code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_a : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
_a : Union[str, Any] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
_a : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def a_ ( __magic_name__ ) -> List[str]:
"""simple docstring"""
with open(__magic_name__ , '''rb''' ) as f:
snake_case : List[str] = Image.open(__magic_name__ )
return im.convert('''RGB''' )
@dataclass
class a_ :
A__ : Optional[str] = field(
default=a , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
A__ : Optional[str] = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A__ : Optional[str] = field(default=a , metadata={'help': 'A folder containing the training data.'} )
A__ : Optional[str] = field(default=a , metadata={'help': 'A folder containing the validation data.'} )
A__ : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
A__ : Optional[int] = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A__ : Optional[int] = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class a_ :
A__ : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
A__ : Optional[str] = field(
default=a , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(a )} , )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
A__ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A__ : str = field(default=a , metadata={'help': 'Name or path of preprocessor config.'} )
A__ : bool = field(
default=a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A__ : bool = field(
default=a , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
snake_case : str = torch.stack([example['''pixel_values'''] for example in examples] )
snake_case : List[Any] = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case , snake_case , snake_case : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case , snake_case , snake_case : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , __magic_name__ , __magic_name__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(__magic_name__ )
transformers.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
snake_case : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
snake_case : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case : Optional[Any] = {}
if data_args.train_dir is not None:
snake_case : str = os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
snake_case : Dict = os.path.join(data_args.validation_dir , '''**''' )
snake_case : Tuple = load_dataset(
'''imagefolder''' , data_files=__magic_name__ , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case : List[Any] = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __magic_name__ ) and data_args.train_val_split > 0.0:
snake_case : List[str] = dataset['''train'''].train_test_split(data_args.train_val_split )
snake_case : List[Any] = split['''train''']
snake_case : List[str] = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case : List[str] = dataset['''train'''].features['''labels'''].names
snake_case , snake_case : Dict = {}, {}
for i, label in enumerate(__magic_name__ ):
snake_case : Dict = str(__magic_name__ )
snake_case : Tuple = label
# Load the accuracy metric from the datasets package
snake_case : Optional[int] = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__magic_name__ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
snake_case : Any = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__magic_name__ ) , labelaid=__magic_name__ , idalabel=__magic_name__ , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case : Tuple = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
snake_case : str = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
snake_case : Any = image_processor.size['''shortest_edge''']
else:
snake_case : Tuple = (image_processor.size['''height'''], image_processor.size['''width'''])
snake_case : Union[str, Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
snake_case : List[Any] = Compose(
[
RandomResizedCrop(__magic_name__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
snake_case : Optional[Any] = Compose(
[
Resize(__magic_name__ ),
CenterCrop(__magic_name__ ),
ToTensor(),
normalize,
] )
def train_transforms(__magic_name__ ):
snake_case : Optional[int] = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(__magic_name__ ):
snake_case : Union[str, Any] = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
snake_case : Optional[Any] = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__magic_name__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
snake_case : Dict = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__magic_name__ )
# Initalize our trainer
snake_case : Dict = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=__magic_name__ , tokenizer=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
snake_case : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
snake_case : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case : List[Any] = last_checkpoint
snake_case : Dict = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case : List[Any] = trainer.evaluate()
trainer.log_metrics('''eval''' , __magic_name__ )
trainer.save_metrics('''eval''' , __magic_name__ )
# Write model card and (optionally) push to hub
snake_case : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__magic_name__ )
else:
trainer.create_model_card(**__magic_name__ )
if __name__ == "__main__":
main()
| 84 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=8 ) -> str:
"""simple docstring"""
snake_case : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if latents is None:
snake_case : int = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case : Optional[Any] = latents.to(UpperCAmelCase__ )
snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case : Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
snake_case : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Optional[int] = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
snake_case : Optional[int] = self._execution_device
snake_case : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Any = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : int = torch.cat(UpperCAmelCase__ , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Dict = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Tuple = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
snake_case : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
snake_case : str = self.scheduler.timesteps
snake_case : Optional[Any] = self.movq.config.latent_channels
snake_case , snake_case : Optional[Any] = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
snake_case : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Optional[int] = {'''image_embeds''': image_embeds, '''hint''': hint}
snake_case : Any = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Any = noise_pred.chunk(2 )
snake_case , snake_case : Dict = variance_pred.chunk(2 )
snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
snake_case : List[Any] = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case : Optional[Any] = image * 0.5 + 0.5
snake_case : int = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : str = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 1 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : int=99 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : Any=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Union[str, Any]=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Dict=512 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : List[Any]=None , ):
"""simple docstring"""
snake_case : Optional[int] = parent
snake_case : Union[str, Any] = batch_size
snake_case : Dict = seq_length
snake_case : int = is_training
snake_case : List[str] = use_input_mask
snake_case : Optional[int] = use_token_type_ids
snake_case : Optional[Any] = use_labels
snake_case : List[Any] = vocab_size
snake_case : Dict = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : str = intermediate_size
snake_case : Optional[int] = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : Optional[Any] = attention_probs_dropout_prob
snake_case : Optional[int] = max_position_embeddings
snake_case : List[Any] = type_vocab_size
snake_case : List[str] = type_sequence_label_size
snake_case : Any = initializer_range
snake_case : int = num_labels
snake_case : str = num_choices
snake_case : List[Any] = scope
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : List[Any] = None
if self.use_input_mask:
snake_case : Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Dict = None
snake_case : str = None
snake_case : List[str] = None
if self.use_labels:
snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
snake_case : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = DistilBertModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : int = model(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = DistilBertForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
"""simple docstring"""
snake_case : Tuple = DistilBertForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Any = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ):
"""simple docstring"""
snake_case : Dict = self.num_labels
snake_case : Optional[Any] = DistilBertForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Optional[int] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Tuple = self.num_labels
snake_case : Any = DistilBertForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[Any] = self.num_choices
snake_case : int = DistilBertForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Any = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Optional[int] = self.prepare_config_and_inputs()
((snake_case) , (snake_case) , (snake_case) , (snake_case) , (snake_case) , (snake_case)) : Union[str, Any] = config_and_inputs
snake_case : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a_ ( a , a , unittest.TestCase ):
A__ : Tuple = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
A__ : Union[str, Any] = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : str = True
A__ : str = True
A__ : Union[str, Any] = True
A__ : List[Any] = True
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = DistilBertModelTester(self )
snake_case : str = ConfigTester(self , config_class=UpperCAmelCase__ , dim=37 )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase__ )
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase__ )
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : str ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Dict = DistilBertModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@slow
@require_torch_gpu
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case , snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
snake_case : List[str] = True
snake_case : Optional[Any] = model_class(config=UpperCAmelCase__ )
snake_case : str = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Union[str, Any] = torch.jit.trace(
UpperCAmelCase__ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , '''traced_model.pt''' ) )
snake_case : List[Any] = torch.jit.load(os.path.join(UpperCAmelCase__ , '''traced_model.pt''' ) , map_location=UpperCAmelCase__ )
loaded(inputs_dict['''input_ids'''].to(UpperCAmelCase__ ) , inputs_dict['''attention_mask'''].to(UpperCAmelCase__ ) )
@require_torch
class a_ ( unittest.TestCase ):
@slow
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Dict = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
snake_case : Tuple = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
snake_case : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case : Tuple = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
snake_case : Dict = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase__ )
snake_case : Any = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1e-4 ) )
| 84 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a_ ( a , unittest.TestCase ):
A__ : Dict = ReformerTokenizer
A__ : Optional[int] = ReformerTokenizerFast
A__ : str = True
A__ : Tuple = False
A__ : str = True
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
super().setUp()
snake_case : str = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = '''<s>'''
snake_case : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1_000 )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case : Any = self.get_tokenizer()
snake_case : str = self.get_rust_tokenizer()
snake_case : Tuple = '''I was born in 92000, and this is falsé.'''
snake_case : str = tokenizer.tokenize(UpperCAmelCase__ )
snake_case : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
snake_case : List[str] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : Optional[int] = tokenizer.encode(UpperCAmelCase__ )
snake_case : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any]=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : str = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# Simple input
snake_case : Union[str, Any] = '''This is a simple input'''
snake_case : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case : int = ('''This is a simple input''', '''This is a pair''')
snake_case : int = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
snake_case : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
snake_case : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case : int = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Any = '''Hello World!'''
snake_case : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case : Union[str, Any] = ''' '''.join(UpperCAmelCase__ )
snake_case : Optional[int] = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' )
snake_case : List[str] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
snake_case : Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case : Tuple = encoded_sequence['''input_ids'''].shape
snake_case : List[Any] = ReformerModel(UpperCAmelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
# fmt: off
snake_case : Tuple = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case : Tuple = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=UpperCAmelCase__ , sequences=UpperCAmelCase__ , )
| 84 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a_ ( a ):
A__ : Dict = ['image_processor', 'tokenizer']
A__ : int = 'OwlViTImageProcessor'
A__ : Dict = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Any , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase__ , )
snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' )
snake_case : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self : Dict , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[int]="max_length" , UpperCAmelCase__ : List[Any]="np" , **UpperCAmelCase__ : Any ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or (isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(text[0] , UpperCAmelCase__ )):
snake_case : Tuple = [self.tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )]
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(text[0] , UpperCAmelCase__ ):
snake_case : int = []
# Maximum number of queries across batch
snake_case : Union[str, Any] = max([len(UpperCAmelCase__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCAmelCase__ ) != max_num_queries:
snake_case : Union[str, Any] = t + [''' '''] * (max_num_queries - len(UpperCAmelCase__ ))
snake_case : Dict = self.tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
encodings.append(UpperCAmelCase__ )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
snake_case : List[Any] = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
snake_case : List[str] = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
snake_case : List[Any] = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
snake_case : Any = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
snake_case : Optional[Any] = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
snake_case : Dict = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
snake_case : Dict = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
snake_case : Optional[int] = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
snake_case : Tuple = BatchEncoding()
snake_case : Union[str, Any] = input_ids
snake_case : Union[str, Any] = attention_mask
if query_images is not None:
snake_case : List[Any] = BatchEncoding()
snake_case : Union[str, Any] = self.image_processor(
UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ ).pixel_values
snake_case : List[Any] = query_pixel_values
if images is not None:
snake_case : Union[str, Any] = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and images is not None:
snake_case : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
snake_case : Dict = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ )
def lowerCAmelCase( self : int , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : List[str] ):
"""simple docstring"""
return self.image_processor.post_process(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : str , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Tuple ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : str , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Dict ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Any , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Union[str, Any] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase__ , )
return self.image_processor_class
@property
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase__ , )
return self.image_processor
| 84 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case , snake_case : Any = image.size
snake_case , snake_case : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case : Dict = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
snake_case : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case : Tuple = torch.from_numpy(__magic_name__ )
return 2.0 * image - 1.0
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : VQModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[int] = 100 , UpperCAmelCase__ : Optional[float] = 0.0 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[int] = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
snake_case : Any = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase__ )}" )
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[Any] = preprocess(UpperCAmelCase__ )
snake_case , snake_case : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case : str = next(self.unet.parameters() ).dtype
snake_case : Dict = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
snake_case : Any = image.to(device=self.device , dtype=UpperCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device )
snake_case : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_eta:
snake_case : Dict = eta
for t in self.progress_bar(UpperCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
snake_case : Optional[int] = torch.cat([latents, image] , dim=1 )
snake_case : str = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
snake_case : int = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : Any = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
snake_case : Optional[int] = self.vqvae.decode(UpperCAmelCase__ ).sample
snake_case : int = torch.clamp(UpperCAmelCase__ , -1.0 , 1.0 )
snake_case : Dict = image / 2 + 0.5
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Any = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class a_ :
def __init__( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : List[Any]=7 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : Optional[int]=37 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : str=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Dict=0 , ):
"""simple docstring"""
snake_case : Dict = parent
snake_case : List[str] = batch_size
snake_case : Optional[int] = seq_length
snake_case : Union[str, Any] = is_training
snake_case : Optional[Any] = use_input_mask
snake_case : Tuple = use_token_type_ids
snake_case : List[Any] = use_labels
snake_case : List[Any] = vocab_size
snake_case : Union[str, Any] = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[Any] = intermediate_size
snake_case : Optional[int] = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : int = max_position_embeddings
snake_case : Optional[int] = type_vocab_size
snake_case : List[str] = type_sequence_label_size
snake_case : Union[str, Any] = initializer_range
snake_case : int = num_labels
snake_case : str = num_choices
snake_case : Optional[int] = scope
snake_case : List[Any] = projection_dim
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Optional[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
snake_case : str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Tuple = None
if self.use_token_type_ids:
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : List[Any] = None
snake_case : Any = None
snake_case : str = None
if self.use_labels:
snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : str = ids_tensor([self.batch_size] , self.num_choices )
snake_case : List[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
snake_case : Any = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase( self : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
snake_case : Dict = TFDPRContextEncoder(config=UpperCAmelCase__ )
snake_case : Dict = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
snake_case : str = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
snake_case : int = model(UpperCAmelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : str = TFDPRQuestionEncoder(config=UpperCAmelCase__ )
snake_case : Any = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
snake_case : Optional[int] = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
snake_case : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
snake_case : List[str] = TFDPRReader(config=UpperCAmelCase__ )
snake_case : str = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Tuple = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : List[str] = config_and_inputs
snake_case : Optional[int] = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class a_ ( a , a , unittest.TestCase ):
A__ : Any = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
A__ : Optional[Any] = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
A__ : Union[str, Any] = False
A__ : int = False
A__ : List[str] = False
A__ : List[str] = False
A__ : str = False
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Optional[int] = TFDPRModelTester(self )
snake_case : Any = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : int ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[Any] = TFDPRContextEncoder.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Tuple = TFDPRContextEncoder.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : str = TFDPRReader.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_tf
class a_ ( unittest.TestCase ):
@slow
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : int = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
snake_case : int = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
snake_case : List[Any] = model(UpperCAmelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
snake_case : Any = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 84 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class a_ ( a ):
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Dict = 5
# Realm tok
snake_case : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
snake_case : Any = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[int] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Dict = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=UpperCAmelCase__ , )
return block_records
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Tuple = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = self.get_config()
snake_case : Optional[Any] = self.get_dummy_retriever()
snake_case : Optional[int] = retriever.tokenizer
snake_case : Dict = np.array([0, 3] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Union[str, Any] = tokenizer(
['''the fourth'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : Optional[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : List[str] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.get_config()
snake_case : Optional[int] = self.get_dummy_retriever()
snake_case : List[str] = retriever.tokenizer
snake_case : Optional[Any] = np.array([0, 3, 5] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Any = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : List[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : Union[str, Any] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual([False, True, True] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
snake_case : Optional[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
snake_case : Any = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case : Any = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 84 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a : int = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_a : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : str = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 | 1 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def a_ ( __magic_name__ ) -> Optional[int]: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def a_ ( __magic_name__ ) -> Optional[int]: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class a_ :
A__ : int
A__ : str
class a_ ( a ):
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Tuple = {}
snake_case : Optional[int] = []
snake_case : Optional[int] = 1
snake_case : Optional[int] = [1, 2]
snake_case : List[Any] = {'''a''': 1, '''b''': 2}
snake_case : Optional[Any] = {'''a''': [1, 2], '''b''': [3, 4]}
snake_case : Union[str, Any] = {'''a''': {'''1''': 1}, '''b''': 2}
snake_case : str = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
snake_case : List[Any] = {}
snake_case : Optional[Any] = []
snake_case : Dict = 2
snake_case : Tuple = [2, 3]
snake_case : Any = {'''a''': 2, '''b''': 3}
snake_case : Union[str, Any] = {'''a''': [2, 3], '''b''': [4, 5]}
snake_case : Union[str, Any] = {'''a''': {'''1''': 2}, '''b''': 3}
snake_case : Tuple = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ )
snake_case : Any = 2
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__ ) , UpperCAmelCase__ )
snake_case : str = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
snake_case : Dict = {'''a''': 2, '''b''': 0, '''c''': 2}
snake_case : int = {
'''a''': np.eye(2 ).astype(UpperCAmelCase__ ),
'''b''': np.zeros(3 ).astype(UpperCAmelCase__ ),
'''c''': np.ones(2 ).astype(UpperCAmelCase__ ),
}
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , map_numpy=UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase__ , UpperCAmelCase__ , map_numpy=UpperCAmelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , map_numpy=UpperCAmelCase__ , num_proc=UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase__ , UpperCAmelCase__ , map_numpy=UpperCAmelCase__ , num_proc=UpperCAmelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCAmelCase__ ): # can't pickle a local lambda
map_nested(lambda UpperCAmelCase__ : x + 1 , UpperCAmelCase__ , num_proc=UpperCAmelCase__ )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Dict = {'''a''': 1, '''b''': 2}
snake_case : Any = {'''a''': 3, '''b''': 4}
snake_case : Tuple = {'''a''': 5, '''b''': 6}
snake_case : List[str] = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) ) , UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
class a_ :
A__ : str = 'bar'
snake_case : Any = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(UpperCAmelCase__ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
"""simple docstring"""
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
snake_case : List[str] = {F"{i}": i for i in range(__magic_name__ )}
snake_case : Tuple = map_nested(lambda __magic_name__ : x + 10 , __magic_name__ , num_proc=__magic_name__ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class a_ ( a ):
@require_tf
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
snake_case : Optional[Any] = layers.Dense(2 )
def gen_random_output():
snake_case : Optional[Any] = tf.random.uniform((1, 3) )
return model(UpperCAmelCase__ ).numpy()
with temp_seed(42 , set_tensorflow=UpperCAmelCase__ ):
snake_case : int = gen_random_output()
with temp_seed(42 , set_tensorflow=UpperCAmelCase__ ):
snake_case : int = gen_random_output()
snake_case : Optional[int] = gen_random_output()
np.testing.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
import torch
def gen_random_output():
snake_case : Tuple = torch.nn.Linear(3 , 2 )
snake_case : Optional[int] = torch.rand(1 , 3 )
return model(UpperCAmelCase__ ).detach().numpy()
with temp_seed(42 , set_pytorch=UpperCAmelCase__ ):
snake_case : Tuple = gen_random_output()
with temp_seed(42 , set_pytorch=UpperCAmelCase__ ):
snake_case : int = gen_random_output()
snake_case : List[Any] = gen_random_output()
np.testing.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
snake_case : Any = gen_random_output()
with temp_seed(42 ):
snake_case : Dict = gen_random_output()
snake_case : List[Any] = gen_random_output()
np.testing.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''' , [{}] )
def a_ ( __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Any = NestedDataStructure(__magic_name__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def a_ ( __magic_name__ , __magic_name__ ) -> str:
"""simple docstring"""
snake_case : Tuple = NestedDataStructure(__magic_name__ ).flatten()
assert output == expected_output
def a_ ( ) -> Tuple:
"""simple docstring"""
snake_case : List[Any] = A(x=1 , y='''foobar''' )
snake_case : Optional[Any] = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(__magic_name__ ) == expected_output
snake_case : Optional[int] = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]}
snake_case : int = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(__magic_name__ ) == expected_output
with pytest.raises(__magic_name__ ):
asdict([1, A(x=10 , y='''foo''' )] )
def a_ ( __magic_name__ ) -> List[str]:
"""simple docstring"""
return text.split()
def a_ ( __magic_name__ ) -> Optional[int]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def a_ ( ) -> Dict:
"""simple docstring"""
with Pool(2 ) as pool:
snake_case : Tuple = list(iflatmap_unordered(__magic_name__ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__magic_name__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
snake_case : Dict = list(iflatmap_unordered(__magic_name__ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__magic_name__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
snake_case : Tuple = []
for yield_time, content in iflatmap_unordered(
__magic_name__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__magic_name__ )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(__magic_name__ ) == 4
| 84 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_a : Union[str, Any] = {
'yjernite/retribert-base-uncased': 512,
}
_a : Tuple = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class a_ ( a ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Any = PRETRAINED_INIT_CONFIGURATION
A__ : Optional[Any] = RetriBertTokenizer
A__ : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict="[UNK]" , UpperCAmelCase__ : str="[SEP]" , UpperCAmelCase__ : Union[str, Any]="[PAD]" , UpperCAmelCase__ : Dict="[CLS]" , UpperCAmelCase__ : Optional[Any]="[MASK]" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Dict , ):
"""simple docstring"""
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
snake_case : int = getattr(UpperCAmelCase__ , normalizer_state.pop('''type''' ) )
snake_case : List[Any] = do_lower_case
snake_case : Union[str, Any] = strip_accents
snake_case : int = tokenize_chinese_chars
snake_case : int = normalizer_class(**UpperCAmelCase__ )
snake_case : Union[str, Any] = do_lower_case
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=None ):
"""simple docstring"""
snake_case : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : List[Any] = [self.sep_token_id]
snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
snake_case : Tuple = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 84 | 1 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
snake_case : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
import string
import numpy
def a_ ( __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , __magic_name__ )
class a_ :
A__ : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
A__ : List[str] = numpy.vectorize(lambda a : x % 36 )
A__ : Dict = numpy.vectorize(a )
def __init__( self : List[str] , UpperCAmelCase__ : numpy.ndarray ):
"""simple docstring"""
snake_case : int = self.modulus(UpperCAmelCase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
snake_case : List[str] = encrypt_key.shape[0]
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
return self.key_string.index(UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : int ):
"""simple docstring"""
return self.key_string[round(UpperCAmelCase__ )]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : Tuple = det % len(self.key_string )
snake_case : Tuple = len(self.key_string )
if greatest_common_divisor(UpperCAmelCase__ , len(self.key_string ) ) != 1:
snake_case : List[Any] = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = [char for char in text.upper() if char in self.key_string]
snake_case : Optional[int] = chars[-1]
while len(UpperCAmelCase__ ) % self.break_key != 0:
chars.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = self.process_text(text.upper() )
snake_case : Optional[int] = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : int = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : Tuple = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(self.encrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[
0
]
snake_case : Dict = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : int = det % len(self.key_string )
snake_case : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
snake_case : Any = i
break
snake_case : Any = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCAmelCase__ ) )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Any = self.make_decrypt_key()
snake_case : Optional[Any] = self.process_text(text.upper() )
snake_case : int = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : Any = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : List[str] = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(decrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[0]
snake_case : int = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a_ ( ) -> None:
"""simple docstring"""
snake_case : Any = int(input('''Enter the order of the encryption key: ''' ) )
snake_case : List[Any] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(__magic_name__ ):
snake_case : Optional[Any] = [int(__magic_name__ ) for x in input().split()]
hill_matrix.append(__magic_name__ )
snake_case : List[str] = HillCipher(numpy.array(__magic_name__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
snake_case : int = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
snake_case : List[Any] = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(__magic_name__ ) )
elif option == "2":
snake_case : int = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 84 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : Union[str, Any] = logging.getLogger(__name__)
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class a_ :
A__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class a_ :
A__ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
A__ : str = field(metadata={'help': 'Should contain the data files for the task.'} )
A__ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A__ : bool = field(
default=a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case , snake_case , snake_case : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __magic_name__ )
# Set seed
set_seed(training_args.seed )
try:
snake_case : int = processors[data_args.task_name]()
snake_case : List[str] = processor.get_labels()
snake_case : str = len(__magic_name__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case : Optional[int] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__magic_name__ ) -> Dict:
snake_case : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__magic_name__ , p.label_ids )}
# Data collator
snake_case : Dict = DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case : List[Any] = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case : Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case : Optional[Any] = trainer.evaluate()
snake_case : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__magic_name__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__magic_name__ )
return results
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 84 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( a ):
A__ : List[Any] = 'Salesforce/blip-image-captioning-base'
A__ : Dict = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
A__ : str = 'image_captioner'
A__ : Dict = AutoModelForVisionaSeq
A__ : Optional[Any] = ['image']
A__ : List[str] = ['text']
def __init__( self : List[str] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=UpperCAmelCase__ , return_tensors='''pt''' )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return self.model.generate(**UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0].strip()
| 84 | 1 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
_a : Any = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
_a : Optional[int] = parser.parse_args()
if args.check_lib:
_a : List[Any] = importlib.import_module('transformers')
_a : Union[str, Any] = Path(transformers_module.__file__).parent
else:
_a : Dict = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 84 |
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
snake_case : int = 4
snake_case : Optional[Any] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 84 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : List[str] ):
"""simple docstring"""
snake_case : Optional[int] = str(id_ )
snake_case : Optional[Any] = None
snake_case : List[Any] = None
snake_case : Dict = []
snake_case : Dict = {} # {vertex:distance}
def __lt__( self : str , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return self.key < other.key
def __repr__( self : Dict ):
"""simple docstring"""
return self.id
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : int ):
"""simple docstring"""
self.neighbors.append(UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ):
"""simple docstring"""
snake_case : Union[str, Any] = weight
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __magic_name__ )
graph[b - 1].add_edge(graph[a - 1] , __magic_name__ )
def a_ ( __magic_name__ , __magic_name__ ) -> list:
"""simple docstring"""
snake_case : Tuple = []
for u in graph:
snake_case : Dict = math.inf
snake_case : Dict = None
snake_case : List[Any] = 0
snake_case : List[str] = graph[:]
while q:
snake_case : Dict = min(__magic_name__ )
q.remove(__magic_name__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
snake_case : Union[str, Any] = u
snake_case : Tuple = u.edges[v.id]
for i in range(1 , len(__magic_name__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def a_ ( __magic_name__ , __magic_name__ ) -> Iterator[tuple]:
"""simple docstring"""
for u in graph:
snake_case : Optional[int] = math.inf
snake_case : List[str] = None
snake_case : Optional[Any] = 0
snake_case : Dict = list(__magic_name__ )
hq.heapify(__magic_name__ )
while h:
snake_case : List[Any] = hq.heappop(__magic_name__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
snake_case : List[str] = u
snake_case : int = u.edges[v.id]
hq.heapify(__magic_name__ )
for i in range(1 , len(__magic_name__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def a_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
from sklearn.metrics import fa_score
import datasets
_a : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
_a : Dict = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
_a : List[Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : List[str]="binary" , UpperCAmelCase__ : str=None ):
"""simple docstring"""
snake_case : List[Any] = fa_score(
UpperCAmelCase__ , UpperCAmelCase__ , labels=UpperCAmelCase__ , pos_label=UpperCAmelCase__ , average=UpperCAmelCase__ , sample_weight=UpperCAmelCase__ )
return {"f1": float(UpperCAmelCase__ ) if score.size == 1 else score}
| 84 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
snake_case : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
snake_case : Any = DisjunctiveConstraint(UpperCAmelCase__ )
self.assertTrue(isinstance(dc.token_ids , UpperCAmelCase__ ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
snake_case : List[str] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(UpperCAmelCase__ ) # fails here
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Union[str, Any] = [[1, 2, 3], [1, 2, 4]]
snake_case : Tuple = DisjunctiveConstraint(UpperCAmelCase__ )
snake_case , snake_case , snake_case : Union[str, Any] = dc.update(1 )
snake_case : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case , snake_case , snake_case : Dict = dc.update(2 )
snake_case : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case , snake_case , snake_case : List[Any] = dc.update(3 )
snake_case : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case : Dict = DisjunctiveConstraint(UpperCAmelCase__ )
snake_case , snake_case , snake_case : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case , snake_case , snake_case : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case , snake_case , snake_case : Optional[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case , snake_case , snake_case : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case , snake_case , snake_case : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case , snake_case , snake_case : List[str] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case , snake_case , snake_case : Tuple = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 84 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('''only integers accepted as input''' )
else:
snake_case : str = str(abs(__magic_name__ ) )
snake_case : Optional[Any] = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int(''''''.join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 84 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_a : Tuple = 'true'
def a_ ( __magic_name__ , __magic_name__=82 , __magic_name__=16 ) -> Optional[int]:
"""simple docstring"""
set_seed(42 )
snake_case : int = RegressionModel()
snake_case : Union[str, Any] = deepcopy(__magic_name__ )
snake_case : List[Any] = RegressionDataset(length=__magic_name__ )
snake_case : Optional[int] = DataLoader(__magic_name__ , batch_size=__magic_name__ )
model.to(accelerator.device )
snake_case , snake_case : List[str] = accelerator.prepare(__magic_name__ , __magic_name__ )
return model, ddp_model, dataloader
def a_ ( __magic_name__ , __magic_name__=False ) -> List[Any]:
"""simple docstring"""
snake_case : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
snake_case : int = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__magic_name__ ):
snake_case : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
with accelerator.main_process_first():
snake_case : str = dataset.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
snake_case : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ ):
if use_longest:
return tokenizer.pad(__magic_name__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__magic_name__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(__magic_name__ , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=16 )
def a_ ( __magic_name__ , __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case : Union[str, Any] = Accelerator(dispatch_batches=__magic_name__ , split_batches=__magic_name__ )
snake_case : Union[str, Any] = get_dataloader(__magic_name__ , not dispatch_batches )
snake_case : Any = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__magic_name__ )
snake_case , snake_case : Optional[Any] = accelerator.prepare(__magic_name__ , __magic_name__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case : Dict = []
for batch in dataloader:
snake_case , snake_case : Optional[int] = batch.values()
with torch.no_grad():
snake_case : Tuple = model(__magic_name__ )
snake_case , snake_case : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
snake_case , snake_case : Any = [], []
for logit, targ in logits_and_targets:
logits.append(__magic_name__ )
targs.append(__magic_name__ )
snake_case , snake_case : List[str] = torch.cat(__magic_name__ ), torch.cat(__magic_name__ )
return logits, targs
def a_ ( __magic_name__ , __magic_name__=82 , __magic_name__=False , __magic_name__=False , __magic_name__=16 ) -> str:
"""simple docstring"""
snake_case , snake_case , snake_case : str = get_basic_setup(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case , snake_case : Optional[int] = generate_predictions(__magic_name__ , __magic_name__ , __magic_name__ )
assert (
len(__magic_name__ ) == num_samples
), F"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__magic_name__ )}"
def a_ ( __magic_name__ = False , __magic_name__ = False ) -> Any:
"""simple docstring"""
snake_case : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
snake_case , snake_case : Optional[Any] = get_mrpc_setup(__magic_name__ , __magic_name__ )
# First do baseline
snake_case , snake_case , snake_case : str = setup['''no''']
model.to(__magic_name__ )
model.eval()
for batch in dataloader:
batch.to(__magic_name__ )
with torch.inference_mode():
snake_case : List[str] = model(**__magic_name__ )
snake_case : List[str] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__magic_name__ , references=batch['''labels'''] )
snake_case : List[str] = metric.compute()
# Then do distributed
snake_case , snake_case , snake_case : Tuple = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
snake_case : Optional[Any] = model(**__magic_name__ )
snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
snake_case : Optional[int] = batch['''labels''']
snake_case , snake_case : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__magic_name__ , references=__magic_name__ )
snake_case : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def a_ ( ) -> List[str]:
"""simple docstring"""
snake_case : str = Accelerator(split_batches=__magic_name__ , dispatch_batches=__magic_name__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(__magic_name__ , __magic_name__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
snake_case : Dict = Accelerator(split_batches=__magic_name__ , dispatch_batches=__magic_name__ )
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(__magic_name__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
snake_case : str = Accelerator()
test_torch_metrics(__magic_name__ , 512 )
accelerator.state._reset_state()
def a_ ( __magic_name__ ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 84 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=99 , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Any=9 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : str=0.002 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , ):
"""simple docstring"""
snake_case : Union[str, Any] = parent
snake_case : Union[str, Any] = batch_size
snake_case : Any = encoder_seq_length
snake_case : str = decoder_seq_length
# For common tests
snake_case : Optional[int] = self.decoder_seq_length
snake_case : Optional[Any] = is_training
snake_case : List[Any] = use_attention_mask
snake_case : Union[str, Any] = use_labels
snake_case : Any = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : Any = d_ff
snake_case : Any = relative_attention_num_buckets
snake_case : Optional[Any] = dropout_rate
snake_case : int = initializer_factor
snake_case : Optional[Any] = eos_token_id
snake_case : Dict = pad_token_id
snake_case : Optional[Any] = decoder_start_token_id
snake_case : Union[str, Any] = None
snake_case : List[str] = decoder_layers
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
return TaConfig.from_pretrained('''google/umt5-base''' )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
snake_case : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case : List[Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if decoder_head_mask is None:
snake_case : Tuple = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if cross_attn_head_mask is None:
snake_case : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case : List[str] = input_ids.clamp(self.pad_token_id + 1 )
snake_case : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case : str = self.get_config()
snake_case : Tuple = config.num_attention_heads
snake_case : List[Any] = self.prepare_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, input_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , ):
"""simple docstring"""
snake_case : str = UMTaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : str = model(
input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , )
snake_case : int = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
snake_case : int = result.last_hidden_state
snake_case : Dict = result.past_key_values
snake_case : Dict = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval()
# first forward pass
snake_case : List[Any] = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
snake_case : List[Any] = model(UpperCAmelCase__ )
snake_case : Any = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) + 1 )
snake_case , snake_case : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : Any = model(UpperCAmelCase__ )['''last_hidden_state''']
snake_case : Optional[Any] = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )['''last_hidden_state''']
# select random slice
snake_case : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).half().eval()
snake_case : str = model(**UpperCAmelCase__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(UpperCAmelCase__ ).any().item() )
@require_torch
class a_ ( a , a , a , unittest.TestCase ):
A__ : str = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A__ : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A__ : Any = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A__ : Dict = True
A__ : List[str] = False
A__ : Optional[int] = False
A__ : Optional[int] = True
A__ : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A__ : int = [0.8, 0.9]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
snake_case : Optional[Any] = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=UpperCAmelCase__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Optional[int] = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
snake_case : int = config_and_inputs[0]
snake_case : Union[str, Any] = UMTaForConditionalGeneration(UpperCAmelCase__ ).eval()
model.to(UpperCAmelCase__ )
snake_case : str = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase__ , head_masking.items() ):
snake_case : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case : List[str] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ )
snake_case : Union[str, Any] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , **UpperCAmelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=UpperCAmelCase__ ).to(UpperCAmelCase__ )
snake_case : int = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=UpperCAmelCase__ , legacy=UpperCAmelCase__ )
snake_case : List[str] = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
snake_case : Dict = tokenizer(UpperCAmelCase__ , return_tensors='''pt''' , padding=UpperCAmelCase__ ).input_ids
# fmt: off
snake_case : Optional[Any] = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = model.generate(input_ids.to(UpperCAmelCase__ ) )
snake_case : int = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
snake_case : Tuple = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 84 | 1 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('''only integers accepted as input''' )
else:
snake_case : str = str(abs(__magic_name__ ) )
snake_case : Optional[Any] = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int(''''''.join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 84 |
import torch
from diffusers import DiffusionPipeline
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
def __call__( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
snake_case : Dict = 1
snake_case : Optional[Any] = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
snake_case : List[Any] = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
snake_case : List[Any] = scheduler_output - scheduler_output + torch.ones_like(UpperCAmelCase__ )
return result
| 84 | 1 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a : Optional[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class a_ ( a ):
A__ : Tuple = 'conditional_detr'
A__ : int = ['past_key_values']
A__ : Optional[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=None , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Any=300 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2_048 , UpperCAmelCase__ : Optional[int]=8 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Optional[Any]=2_048 , UpperCAmelCase__ : Any=8 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]="relu" , UpperCAmelCase__ : str=256 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[str]=1.0 , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : str="sine" , UpperCAmelCase__ : Tuple="resnet50" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Union[str, Any]=1 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Dict=0.25 , **UpperCAmelCase__ : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
snake_case : Optional[int] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Tuple = backbone_config.get('''model_type''' )
snake_case : str = CONFIG_MAPPING[backbone_model_type]
snake_case : str = config_class.from_dict(UpperCAmelCase__ )
snake_case : Dict = use_timm_backbone
snake_case : List[Any] = backbone_config
snake_case : List[Any] = num_channels
snake_case : List[str] = num_queries
snake_case : str = d_model
snake_case : int = encoder_ffn_dim
snake_case : str = encoder_layers
snake_case : int = encoder_attention_heads
snake_case : Dict = decoder_ffn_dim
snake_case : int = decoder_layers
snake_case : Any = decoder_attention_heads
snake_case : Any = dropout
snake_case : Optional[Any] = attention_dropout
snake_case : List[str] = activation_dropout
snake_case : str = activation_function
snake_case : Any = init_std
snake_case : Any = init_xavier_std
snake_case : Tuple = encoder_layerdrop
snake_case : List[Any] = decoder_layerdrop
snake_case : List[Any] = encoder_layers
snake_case : List[str] = auxiliary_loss
snake_case : Optional[Any] = position_embedding_type
snake_case : Any = backbone
snake_case : List[Any] = use_pretrained_backbone
snake_case : int = dilation
# Hungarian matcher
snake_case : Union[str, Any] = class_cost
snake_case : Any = bbox_cost
snake_case : Dict = giou_cost
# Loss coefficients
snake_case : List[Any] = mask_loss_coefficient
snake_case : Union[str, Any] = dice_loss_coefficient
snake_case : Tuple = cls_loss_coefficient
snake_case : Dict = bbox_loss_coefficient
snake_case : int = giou_loss_coefficient
snake_case : Optional[Any] = focal_alpha
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
return self.d_model
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case : Union[str, Any] = self.backbone_config.to_dict()
snake_case : List[str] = self.__class__.model_type
return output
class a_ ( a ):
A__ : Dict = version.parse('1.11' )
@property
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return 1e-5
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return 12
| 84 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( a ):
A__ : List[str] = ['image_processor', 'tokenizer']
A__ : Any = 'CLIPImageProcessor'
A__ : Optional[int] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase__ , )
snake_case : List[Any] = kwargs.pop('''feature_extractor''' )
snake_case : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self : Any , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case : int = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if images is not None:
snake_case : Dict = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and images is not None:
snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : str ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : int = self.tokenizer.model_input_names
snake_case : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase__ , )
return self.image_processor_class
@property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase__ , )
return self.image_processor
| 84 | 1 |
import os
def a_ ( ) -> Any:
"""simple docstring"""
snake_case : Union[str, Any] = os.path.join(os.path.dirname(__magic_name__ ) , '''num.txt''' )
with open(__magic_name__ ) as file_hand:
return str(sum(int(__magic_name__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 84 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_a : Optional[Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
_a : str = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
_a : List[Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Optional[int]=0.9 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : List[Any]=500 , UpperCAmelCase__ : Union[str, Any]="gpt2-large" , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : List[Any]=25 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=25 , ):
"""simple docstring"""
snake_case : List[str] = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 84 | 1 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_a : Optional[int] = logging.get_logger(__name__)
_a : Any = ['model.decoder.embed_positions.weights']
def a_ ( __magic_name__ ) -> str:
"""simple docstring"""
if "emb" in name:
snake_case : List[Any] = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
snake_case : Dict = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
snake_case : Optional[int] = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
snake_case : Union[str, Any] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
snake_case : Tuple = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
snake_case : Tuple = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
snake_case : Optional[int] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
snake_case : str = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
snake_case : str = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
snake_case : int = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case : Any = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def a_ ( __magic_name__ , __magic_name__ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
snake_case : Optional[int] = list(state_dict.keys() )
snake_case : List[str] = {}
for key in keys:
snake_case : Optional[Any] = state_dict.pop(__magic_name__ )
snake_case : str = rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case : Any = val[:hidden_size, :]
snake_case : Dict = val[hidden_size : 2 * hidden_size, :]
snake_case : Dict = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case : Optional[Any] = val
else:
snake_case : Dict = val
return state_dict, enc_dec_proj_state_dict
def a_ ( __magic_name__ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
snake_case : Optional[Any] = 1_024
snake_case : str = 24
snake_case : Any = 16
elif checkpoint == "medium":
snake_case : Optional[Any] = 1_536
snake_case : Optional[int] = 48
snake_case : Union[str, Any] = 24
elif checkpoint == "large":
snake_case : Dict = 2_048
snake_case : Optional[Any] = 48
snake_case : str = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
snake_case : str = MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def a_ ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[int] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
snake_case : str = decoder_config_from_checkpoint(__magic_name__ )
snake_case : int = fairseq_model.lm.state_dict()
snake_case , snake_case : Any = rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
snake_case : int = TaEncoderModel.from_pretrained('''t5-base''' )
snake_case : List[str] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
snake_case : Tuple = MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case , snake_case : Dict = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__magic_name__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
snake_case : Any = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
snake_case : Any = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case : Dict = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case : Union[str, Any] = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
snake_case : List[Any] = AutoTokenizer.from_pretrained('''t5-base''' )
snake_case : Optional[Any] = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
snake_case : Any = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
snake_case : Union[str, Any] = 2_048
snake_case : str = 2_048
# set other default generation config params
snake_case : int = int(30 * audio_encoder.config.frame_rate )
snake_case : Optional[int] = True
snake_case : Optional[Any] = 3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
_a : List[str] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 84 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
if "cls_token" in name:
snake_case : Tuple = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
snake_case : Optional[int] = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
snake_case : List[str] = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case : List[Any] = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case : int = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
snake_case : int = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
snake_case : Optional[Any] = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
snake_case : str = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case : Any = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
snake_case : Dict = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
snake_case : Dict = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
snake_case : Dict = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
snake_case : Optional[int] = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def a_ ( __magic_name__ , __magic_name__ ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case : Union[str, Any] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
snake_case : Optional[int] = key.split('''.''' )
snake_case : int = int(key_split[1] )
if "decoder_blocks" in key:
snake_case : List[str] = config.decoder_hidden_size
snake_case : List[Any] = '''decoder.decoder_layers.'''
if "weight" in key:
snake_case : str = val[:dim, :]
snake_case : Optional[Any] = val[dim : dim * 2, :]
snake_case : Any = val[-dim:, :]
elif "bias" in key:
snake_case : Optional[Any] = val[:dim]
snake_case : List[Any] = val[dim : dim * 2]
snake_case : List[Any] = val[-dim:]
else:
snake_case : Optional[int] = config.hidden_size
snake_case : Tuple = '''vit.encoder.layer.'''
if "weight" in key:
snake_case : Optional[Any] = val[:dim, :]
snake_case : str = val[dim : dim * 2, :]
snake_case : Union[str, Any] = val[-dim:, :]
elif "bias" in key:
snake_case : Tuple = val[:dim]
snake_case : int = val[dim : dim * 2]
snake_case : Optional[Any] = val[-dim:]
else:
snake_case : Optional[Any] = val
return orig_state_dict
def a_ ( __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : List[str] = ViTMAEConfig()
if "large" in checkpoint_url:
snake_case : str = 1_024
snake_case : Tuple = 4_096
snake_case : Optional[Any] = 24
snake_case : List[Any] = 16
elif "huge" in checkpoint_url:
snake_case : Tuple = 14
snake_case : int = 1_280
snake_case : Dict = 5_120
snake_case : Tuple = 32
snake_case : Optional[Any] = 16
snake_case : Optional[Any] = ViTMAEForPreTraining(__magic_name__ )
snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''model''']
snake_case : int = ViTMAEImageProcessor(size=config.image_size )
snake_case : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
snake_case : Tuple = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
snake_case : List[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
snake_case : Dict = ViTMAEImageProcessor(size=config.image_size )
snake_case : str = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
snake_case : Union[str, Any] = model(**__magic_name__ )
snake_case : Optional[Any] = outputs.logits
if "large" in checkpoint_url:
snake_case : Any = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
snake_case : List[Any] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
snake_case : Dict = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a : str = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 84 | 1 |
from collections.abc import Callable
import numpy as np
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> np.ndarray:
"""simple docstring"""
snake_case : Optional[int] = int(np.ceil((x_end - xa) / step_size ) )
snake_case : Union[str, Any] = np.zeros((n + 1,) )
snake_case : Any = ya
snake_case : Tuple = xa
for k in range(__magic_name__ ):
snake_case : Tuple = y[k] + step_size * ode_func(__magic_name__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : Optional[Any] = 16
_a : Union[str, Any] = 32
def a_ ( __magic_name__ , __magic_name__ = 16 ) -> Dict:
"""simple docstring"""
snake_case : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case : Union[str, Any] = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case : Dict = 8
else:
snake_case : Union[str, Any] = None
return tokenizer.pad(
__magic_name__ , padding='''longest''' , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
snake_case : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : Optional[int] = mocked_dataloaders # noqa: F811
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __magic_name__ ) == "1":
snake_case : Optional[int] = 2
# Initialize accelerator
snake_case : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : Dict = config['''lr''']
snake_case : Any = int(config['''num_epochs'''] )
snake_case : List[str] = int(config['''seed'''] )
snake_case : List[Any] = int(config['''batch_size'''] )
snake_case : Tuple = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : str = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case : Optional[int] = AdamW(params=model.parameters() , lr=__magic_name__ )
snake_case , snake_case : List[Any] = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate scheduler
snake_case : int = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case : Tuple = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case : int = model(**__magic_name__ )
snake_case : Optional[int] = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case : List[str] = model(**__magic_name__ )
snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
snake_case , snake_case : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
snake_case : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__magic_name__ , default=__magic_name__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case : Optional[Any] = parser.parse_args()
snake_case : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 84 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
_a : Optional[Any] = TypeVar('T')
_a : Any = TypeVar('U')
class a_ ( Generic[T, U] ):
def __init__( self : List[Any] , UpperCAmelCase__ : T | None , UpperCAmelCase__ : U | None ):
"""simple docstring"""
snake_case : List[Any] = key
snake_case : Union[str, Any] = val
snake_case : DoubleLinkedListNode[T, U] | None = None
snake_case : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : int ):
"""simple docstring"""
return (
F"Node: key: {self.key}, val: {self.val}, "
F"has next: {bool(self.next )}, has prev: {bool(self.prev )}"
)
class a_ ( Generic[T, U] ):
def __init__( self : Optional[Any] ):
"""simple docstring"""
snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case , snake_case : Any = self.rear, self.head
def __repr__( self : str ):
"""simple docstring"""
snake_case : Optional[int] = ['''DoubleLinkedList''']
snake_case : str = self.head
while node.next is not None:
rep.append(str(UpperCAmelCase__ ) )
snake_case : Union[str, Any] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : DoubleLinkedListNode[T, U] ):
"""simple docstring"""
snake_case : Optional[Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
snake_case : Tuple = node
snake_case : Union[str, Any] = previous
snake_case : List[str] = node
snake_case : Tuple = self.rear
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : DoubleLinkedListNode[T, U] ):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
snake_case : Union[str, Any] = node.next
snake_case : Dict = node.prev
snake_case : List[str] = None
snake_case : Optional[Any] = None
return node
class a_ ( Generic[T, U] ):
A__ : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : Dict , UpperCAmelCase__ : int ):
"""simple docstring"""
snake_case : DoubleLinkedList[T, U] = DoubleLinkedList()
snake_case : List[Any] = capacity
snake_case : Tuple = 0
snake_case : Union[str, Any] = 0
snake_case : List[str] = 0
snake_case : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Tuple ):
"""simple docstring"""
return (
F"CacheInfo(hits={self.hits}, misses={self.miss}, "
F"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self : Dict , UpperCAmelCase__ : T ):
"""simple docstring"""
return key in self.cache
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : T ):
"""simple docstring"""
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
snake_case : DoubleLinkedListNode[T, U] = self.cache[key]
snake_case : Optional[int] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(UpperCAmelCase__ )
return node.val
self.miss += 1
return None
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : T , UpperCAmelCase__ : U ):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
snake_case : Dict = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(UpperCAmelCase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
snake_case : Dict = DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
snake_case : Dict = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
snake_case : List[Any] = value
self.list.add(UpperCAmelCase__ )
@classmethod
def lowerCAmelCase( cls : Dict , UpperCAmelCase__ : int = 128 ):
"""simple docstring"""
def cache_decorator_inner(UpperCAmelCase__ : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*UpperCAmelCase__ : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
snake_case : str = LRUCache(UpperCAmelCase__ )
snake_case : str = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
snake_case : List[str] = func(*UpperCAmelCase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , UpperCAmelCase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(UpperCAmelCase__ , '''cache_info''' , UpperCAmelCase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_a : Dict = logging.get_logger(__name__)
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = None ) -> str:
"""simple docstring"""
snake_case : Any = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
snake_case : str = to_pil_image(__magic_name__ )
snake_case , snake_case : Union[str, Any] = pil_image.size
snake_case : List[Any] = pytesseract.image_to_data(__magic_name__ , lang=__magic_name__ , output_type='''dict''' , config=__magic_name__ )
snake_case , snake_case , snake_case , snake_case , snake_case : Optional[Any] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
snake_case : Union[str, Any] = [idx for idx, word in enumerate(__magic_name__ ) if not word.strip()]
snake_case : Union[str, Any] = [word for idx, word in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : Optional[Any] = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case : List[Any] = []
for x, y, w, h in zip(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
snake_case : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(__magic_name__ )
# finally, normalize the bounding boxes
snake_case : List[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__magic_name__ , __magic_name__ , __magic_name__ ) )
assert len(__magic_name__ ) == len(__magic_name__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a_ ( a ):
A__ : int = ['pixel_values']
def __init__( self : Optional[int] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "" , **UpperCAmelCase__ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Any = size if size is not None else {'''height''': 224, '''width''': 224}
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : Dict = do_resize
snake_case : str = size
snake_case : Optional[int] = resample
snake_case : Union[str, Any] = apply_ocr
snake_case : int = ocr_lang
snake_case : Union[str, Any] = tesseract_config
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ):
"""simple docstring"""
snake_case : Dict = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
snake_case : Tuple = (size['''height'''], size['''width'''])
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : List[Any] , ):
"""simple docstring"""
snake_case : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case : List[Any] = size if size is not None else self.size
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : str = resample if resample is not None else self.resample
snake_case : Optional[int] = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case : List[str] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
snake_case : Any = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
snake_case : Optional[int] = []
snake_case : Union[str, Any] = []
for image in images:
snake_case , snake_case : List[Any] = apply_tesseract(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
words_batch.append(UpperCAmelCase__ )
boxes_batch.append(UpperCAmelCase__ )
if do_resize:
snake_case : Any = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case : int = [flip_channel_order(UpperCAmelCase__ ) for image in images]
snake_case : Optional[Any] = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
snake_case : List[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase__ )
if apply_ocr:
snake_case : Dict = words_batch
snake_case : Dict = boxes_batch
return data
| 84 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : Union[str, Any] = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ['ConvNextFeatureExtractor']
_a : Dict = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 84 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[Any]=24 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Optional[int]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Optional[Any]=2 , ):
"""simple docstring"""
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : str = patch_size
snake_case : Union[str, Any] = max_length
snake_case : str = num_mel_bins
snake_case : Any = is_training
snake_case : Union[str, Any] = use_labels
snake_case : Tuple = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : str = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : str = scope
snake_case : int = frequency_stride
snake_case : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
snake_case : Any = (self.max_length - self.patch_size) // self.time_stride + 1
snake_case : Union[str, Any] = frequency_out_dimension * time_out_dimension
snake_case : Union[str, Any] = num_patches + 2
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
snake_case : str = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[str] = self.get_config()
return config, input_values, labels
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : str = ASTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : int = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : int = config_and_inputs
snake_case : Tuple = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class a_ ( a , a , unittest.TestCase ):
A__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
A__ : int = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Dict = False
A__ : int = False
A__ : Optional[int] = False
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[int] = ASTModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Any = model_class(UpperCAmelCase__ )
snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : List[str] = ['''input_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = ASTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Dict = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
snake_case , snake_case : int = torchaudio.load(__magic_name__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class a_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : List[str] = self.default_feature_extractor
snake_case : str = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(UpperCAmelCase__ )
snake_case : str = self.default_feature_extractor
snake_case , snake_case : int = prepare_audio()
snake_case : Optional[int] = audio.squeeze().numpy()
snake_case : Optional[Any] = feature_extractor(UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case : Union[str, Any] = model(**UpperCAmelCase__ )
# verify the logits
snake_case : Any = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
snake_case : str = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 84 | 1 |
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
snake_case : Any = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 84 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : Union[str, Any] = logging.getLogger(__name__)
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class a_ :
A__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class a_ :
A__ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
A__ : str = field(metadata={'help': 'Should contain the data files for the task.'} )
A__ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A__ : bool = field(
default=a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case , snake_case , snake_case : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __magic_name__ )
# Set seed
set_seed(training_args.seed )
try:
snake_case : int = processors[data_args.task_name]()
snake_case : List[str] = processor.get_labels()
snake_case : str = len(__magic_name__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case : Optional[int] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__magic_name__ ) -> Dict:
snake_case : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__magic_name__ , p.label_ids )}
# Data collator
snake_case : Dict = DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case : List[Any] = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case : Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case : Optional[Any] = trainer.evaluate()
snake_case : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__magic_name__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__magic_name__ )
return results
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 84 | 1 |
import os
import numpy
import onnx
def a_ ( __magic_name__ , __magic_name__ ) -> List[str]:
"""simple docstring"""
snake_case : List[Any] = a.name
snake_case : Optional[int] = b.name
snake_case : int = ''''''
snake_case : Union[str, Any] = ''''''
snake_case : Any = a == b
snake_case : Dict = name_a
snake_case : List[Any] = name_b
return res
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__magic_name__ , __magic_name__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __magic_name__ , __magic_name__ )
_graph_replace_input_with(node_proto.attribute[1].g , __magic_name__ , __magic_name__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __magic_name__ , __magic_name__ )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(__magic_name__ , __magic_name__ , __magic_name__ )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case : Any = list(model.graph.initializer )
snake_case : Tuple = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case : str = inits[i].name
snake_case : Optional[int] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __magic_name__ , __magic_name__ )
def a_ ( __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
snake_case : str = os.path.dirname(__magic_name__ )
snake_case : Dict = os.path.basename(__magic_name__ )
snake_case : Optional[Any] = onnx.load(os.path.join(__magic_name__ , __magic_name__ ) )
snake_case : Any = list(model.graph.initializer )
snake_case : Dict = set()
snake_case : Union[str, Any] = {}
snake_case : Optional[Any] = []
snake_case : Union[str, Any] = 0
for i in range(len(__magic_name__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__magic_name__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__magic_name__ )
dup_set.add(__magic_name__ )
snake_case : Dict = inits[j].data_type
snake_case : Union[str, Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , __magic_name__ )
total_reduced_size += mem_size
snake_case : Optional[int] = inits[i].name
snake_case : Optional[Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__magic_name__ )
else:
snake_case : str = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' )
snake_case : Any = sorted(__magic_name__ )
_remove_dup_initializers_from_model(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case : List[Any] = '''optimized_''' + model_file_name
snake_case : Optional[int] = os.path.join(__magic_name__ , __magic_name__ )
onnx.save(__magic_name__ , __magic_name__ )
return new_model
| 84 |
import re
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
snake_case : List[str] = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
_a : Any = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 84 | 1 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int]=13 , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=99 , UpperCAmelCase__ : str=[1, 1, 2] , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : Dict=8 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : Dict="gelu_new" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Tuple=512 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[Any]=False , ):
"""simple docstring"""
snake_case : List[str] = parent
snake_case : Union[str, Any] = batch_size
snake_case : Tuple = seq_length
snake_case : int = is_training
snake_case : int = use_input_mask
snake_case : Optional[Any] = use_token_type_ids
snake_case : Dict = use_labels
snake_case : Tuple = vocab_size
snake_case : Optional[int] = block_sizes
snake_case : Optional[Any] = num_decoder_layers
snake_case : Union[str, Any] = d_model
snake_case : Tuple = n_head
snake_case : Optional[Any] = d_head
snake_case : Optional[int] = d_inner
snake_case : Optional[int] = hidden_act
snake_case : Dict = hidden_dropout
snake_case : List[str] = attention_dropout
snake_case : Dict = activation_dropout
snake_case : List[str] = max_position_embeddings
snake_case : List[str] = type_vocab_size
snake_case : List[str] = 2
snake_case : Optional[Any] = num_labels
snake_case : Tuple = num_choices
snake_case : Dict = scope
snake_case : int = initializer_std
# Used in the tests to check the size of the first attention layer
snake_case : Any = n_head
# Used in the tests to check the size of the first hidden state
snake_case : List[str] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
snake_case : Tuple = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
snake_case : Any = self.num_hidden_layers + 2
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Dict = None
if self.use_input_mask:
snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : int = None
if self.use_token_type_ids:
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : int = None
snake_case : Optional[int] = None
snake_case : int = None
if self.use_labels:
snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case : str = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , ):
"""simple docstring"""
snake_case : Optional[int] = TFFunnelModel(config=UpperCAmelCase__ )
snake_case : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : Union[str, Any] = model(UpperCAmelCase__ )
snake_case : Tuple = [input_ids, input_mask]
snake_case : Dict = model(UpperCAmelCase__ )
snake_case : List[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case : str = False
snake_case : Union[str, Any] = TFFunnelModel(config=UpperCAmelCase__ )
snake_case : Optional[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case : List[Any] = False
snake_case : Dict = TFFunnelModel(config=UpperCAmelCase__ )
snake_case : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , ):
"""simple docstring"""
snake_case : int = TFFunnelBaseModel(config=UpperCAmelCase__ )
snake_case : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : Optional[int] = model(UpperCAmelCase__ )
snake_case : Optional[int] = [input_ids, input_mask]
snake_case : List[str] = model(UpperCAmelCase__ )
snake_case : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
snake_case : List[str] = False
snake_case : Union[str, Any] = TFFunnelBaseModel(config=UpperCAmelCase__ )
snake_case : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
snake_case : Any = False
snake_case : int = TFFunnelBaseModel(config=UpperCAmelCase__ )
snake_case : int = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , ):
"""simple docstring"""
snake_case : Dict = TFFunnelForPreTraining(config=UpperCAmelCase__ )
snake_case : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : List[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , ):
"""simple docstring"""
snake_case : List[str] = TFFunnelForMaskedLM(config=UpperCAmelCase__ )
snake_case : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , ):
"""simple docstring"""
snake_case : Any = self.num_labels
snake_case : Dict = TFFunnelForSequenceClassification(config=UpperCAmelCase__ )
snake_case : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : List[str] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , ):
"""simple docstring"""
snake_case : Dict = self.num_choices
snake_case : Tuple = TFFunnelForMultipleChoice(config=UpperCAmelCase__ )
snake_case : str = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case : List[Any] = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case : Dict = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case : Union[str, Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
snake_case : Dict = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , ):
"""simple docstring"""
snake_case : str = self.num_labels
snake_case : str = TFFunnelForTokenClassification(config=UpperCAmelCase__ )
snake_case : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : Dict = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , ):
"""simple docstring"""
snake_case : Optional[int] = TFFunnelForQuestionAnswering(config=UpperCAmelCase__ )
snake_case : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : List[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : List[Any] = config_and_inputs
snake_case : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class a_ ( a , a , unittest.TestCase ):
A__ : str = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A__ : List[Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ : List[Any] = False
A__ : List[str] = False
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Optional[int] = TFFunnelModelTester(self )
snake_case : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
@require_tf
class a_ ( a , unittest.TestCase ):
A__ : str = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A__ : Optional[int] = False
A__ : Dict = False
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Tuple = TFFunnelModelTester(self , base=UpperCAmelCase__ )
snake_case : Any = ConfigTester(self , config_class=UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*UpperCAmelCase__ )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
| 84 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : int=18 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Optional[int]=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=True , ):
"""simple docstring"""
snake_case : int = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = num_channels
snake_case : Union[str, Any] = image_size
snake_case : Dict = min_resolution
snake_case : Dict = max_resolution
snake_case : int = do_resize
snake_case : List[str] = size
snake_case : List[Any] = apply_ocr
def lowerCAmelCase( self : int ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a_ ( a , unittest.TestCase ):
A__ : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''apply_ocr''' ) )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
# Initialize image_processing
snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
snake_case : Dict = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : List[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : Tuple = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# with apply_OCR = True
snake_case : int = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case : List[Any] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
snake_case : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
snake_case : Any = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case : Optional[Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
snake_case : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
snake_case : str = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
snake_case : Optional[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 84 | 1 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class a_ ( a ):
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Dict = 5
# Realm tok
snake_case : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
snake_case : Any = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[int] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Dict = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=UpperCAmelCase__ , )
return block_records
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Tuple = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = self.get_config()
snake_case : Optional[Any] = self.get_dummy_retriever()
snake_case : Optional[int] = retriever.tokenizer
snake_case : Dict = np.array([0, 3] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Union[str, Any] = tokenizer(
['''the fourth'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : Optional[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : List[str] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.get_config()
snake_case : Optional[int] = self.get_dummy_retriever()
snake_case : List[str] = retriever.tokenizer
snake_case : Optional[Any] = np.array([0, 3, 5] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Any = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : List[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : Union[str, Any] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual([False, True, True] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
snake_case : Optional[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
snake_case : Any = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case : Any = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 84 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=8 ) -> str:
"""simple docstring"""
snake_case : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if latents is None:
snake_case : int = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case : Optional[Any] = latents.to(UpperCAmelCase__ )
snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case : Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
snake_case : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Optional[int] = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
snake_case : Optional[int] = self._execution_device
snake_case : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Any = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : int = torch.cat(UpperCAmelCase__ , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Dict = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Tuple = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
snake_case : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
snake_case : str = self.scheduler.timesteps
snake_case : Optional[Any] = self.movq.config.latent_channels
snake_case , snake_case : Optional[Any] = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
snake_case : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Optional[int] = {'''image_embeds''': image_embeds, '''hint''': hint}
snake_case : Any = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Any = noise_pred.chunk(2 )
snake_case , snake_case : Dict = variance_pred.chunk(2 )
snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
snake_case : List[Any] = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case : Optional[Any] = image * 0.5 + 0.5
snake_case : int = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : str = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 1 |
import re
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
snake_case : List[str] = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
_a : Any = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 84 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a_ ( a , unittest.TestCase ):
A__ : Dict = ReformerTokenizer
A__ : Optional[int] = ReformerTokenizerFast
A__ : str = True
A__ : Tuple = False
A__ : str = True
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
super().setUp()
snake_case : str = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = '''<s>'''
snake_case : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1_000 )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case : Any = self.get_tokenizer()
snake_case : str = self.get_rust_tokenizer()
snake_case : Tuple = '''I was born in 92000, and this is falsé.'''
snake_case : str = tokenizer.tokenize(UpperCAmelCase__ )
snake_case : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
snake_case : List[str] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : Optional[int] = tokenizer.encode(UpperCAmelCase__ )
snake_case : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any]=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : str = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# Simple input
snake_case : Union[str, Any] = '''This is a simple input'''
snake_case : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case : int = ('''This is a simple input''', '''This is a pair''')
snake_case : int = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
snake_case : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
snake_case : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case : int = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Any = '''Hello World!'''
snake_case : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case : Union[str, Any] = ''' '''.join(UpperCAmelCase__ )
snake_case : Optional[int] = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' )
snake_case : List[str] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
snake_case : Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case : Tuple = encoded_sequence['''input_ids'''].shape
snake_case : List[Any] = ReformerModel(UpperCAmelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
# fmt: off
snake_case : Tuple = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case : Tuple = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=UpperCAmelCase__ , sequences=UpperCAmelCase__ , )
| 84 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class a_ :
def __init__( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int]=13 , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=33 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : Tuple=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Union[str, Any]=512 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : List[str]=None , ):
"""simple docstring"""
snake_case : Tuple = parent
snake_case : Optional[int] = batch_size
snake_case : int = seq_length
snake_case : str = is_training
snake_case : Any = use_input_mask
snake_case : List[str] = use_token_type_ids
snake_case : Tuple = use_labels
snake_case : List[str] = vocab_size
snake_case : List[Any] = hidden_size
snake_case : str = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : Optional[int] = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : Optional[Any] = type_vocab_size
snake_case : Union[str, Any] = type_sequence_label_size
snake_case : List[str] = initializer_range
snake_case : str = num_labels
snake_case : str = num_choices
snake_case : List[Any] = scope
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Union[str, Any] = None
if self.use_input_mask:
snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Optional[int] = None
snake_case : Tuple = None
snake_case : Optional[int] = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : str = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = EsmModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
snake_case : Optional[Any] = model(UpperCAmelCase__ )
snake_case : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
snake_case : int = EsmForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
snake_case : int = self.num_labels
snake_case : Optional[Any] = EsmForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Optional[int] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : List[str] = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : str = config_and_inputs
snake_case : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a_ ( a , a , unittest.TestCase ):
A__ : List[Any] = False
A__ : int = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : Optional[int] = ()
A__ : Optional[int] = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : Union[str, Any] = True
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Tuple = EsmModelTester(self )
snake_case : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : Tuple = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Dict = EsmModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs()[0]
snake_case : Union[str, Any] = EsmEmbeddings(config=UpperCAmelCase__ )
snake_case : Optional[int] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
snake_case : Any = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
snake_case : Optional[int] = create_position_ids_from_input_ids(UpperCAmelCase__ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()[0]
snake_case : str = EsmEmbeddings(config=UpperCAmelCase__ )
snake_case : List[str] = torch.empty(2 , 4 , 30 )
snake_case : Any = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
snake_case : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
snake_case : str = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase__ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
@unittest.skip('''Esm does not support embedding resizing''' )
def lowerCAmelCase( self : int ):
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
pass
@require_torch
class a_ ( a ):
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
with torch.no_grad():
snake_case : str = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
snake_case : Dict = torch.tensor([[0, 1, 2, 3, 4, 5]] )
snake_case : Union[str, Any] = model(UpperCAmelCase__ )[0]
snake_case : Tuple = 33
snake_case : Any = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
snake_case : List[Any] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) )
@slow
def lowerCAmelCase( self : int ):
"""simple docstring"""
with torch.no_grad():
snake_case : Optional[int] = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
snake_case : Union[str, Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
snake_case : int = model(UpperCAmelCase__ )[0]
# compare the actual values for a slice.
snake_case : List[str] = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 84 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case , snake_case : Any = image.size
snake_case , snake_case : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case : Dict = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
snake_case : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case : Tuple = torch.from_numpy(__magic_name__ )
return 2.0 * image - 1.0
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : VQModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[int] = 100 , UpperCAmelCase__ : Optional[float] = 0.0 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[int] = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
snake_case : Any = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase__ )}" )
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[Any] = preprocess(UpperCAmelCase__ )
snake_case , snake_case : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case : str = next(self.unet.parameters() ).dtype
snake_case : Dict = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
snake_case : Any = image.to(device=self.device , dtype=UpperCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device )
snake_case : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_eta:
snake_case : Dict = eta
for t in self.progress_bar(UpperCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
snake_case : Optional[int] = torch.cat([latents, image] , dim=1 )
snake_case : str = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
snake_case : int = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : Any = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
snake_case : Optional[int] = self.vqvae.decode(UpperCAmelCase__ ).sample
snake_case : int = torch.clamp(UpperCAmelCase__ , -1.0 , 1.0 )
snake_case : Dict = image / 2 + 0.5
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Any = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_a : Dict = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_a : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class a_ ( a ):
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Dict = 5
# Realm tok
snake_case : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
snake_case : Any = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[int] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Dict = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=UpperCAmelCase__ , )
return block_records
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Tuple = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = self.get_config()
snake_case : Optional[Any] = self.get_dummy_retriever()
snake_case : Optional[int] = retriever.tokenizer
snake_case : Dict = np.array([0, 3] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Union[str, Any] = tokenizer(
['''the fourth'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : Optional[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : List[str] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.get_config()
snake_case : Optional[int] = self.get_dummy_retriever()
snake_case : List[str] = retriever.tokenizer
snake_case : Optional[Any] = np.array([0, 3, 5] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Any = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : List[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : Union[str, Any] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual([False, True, True] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
snake_case : Optional[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
snake_case : Any = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case : Any = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 84 | 1 |
import torch
from torch import nn
class a_ ( nn.Module ):
def __init__( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int=1 , UpperCAmelCase__ : List[str]=False ):
"""simple docstring"""
super().__init__()
snake_case : Any = n_token
snake_case : str = d_embed
snake_case : Optional[int] = d_proj
snake_case : Tuple = cutoffs + [n_token]
snake_case : int = [0] + self.cutoffs
snake_case : Optional[int] = div_val
snake_case : str = self.cutoffs[0]
snake_case : Optional[Any] = len(self.cutoffs ) - 1
snake_case : Union[str, Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
snake_case : Union[str, Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
snake_case : Union[str, Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
snake_case : Tuple = nn.ModuleList()
snake_case : List[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
else:
self.out_projs.append(UpperCAmelCase__ )
self.out_layers.append(nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ ) )
else:
for i in range(len(self.cutoffs ) ):
snake_case , snake_case : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
self.out_layers.append(nn.Linear(UpperCAmelCase__ , r_idx - l_idx ) )
snake_case : Optional[int] = keep_order
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict ):
"""simple docstring"""
if proj is None:
snake_case : Dict = nn.functional.linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
snake_case : Optional[Any] = nn.functional.linear(UpperCAmelCase__ , proj.t().contiguous() )
snake_case : Any = nn.functional.linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Any=False ):
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
snake_case : Tuple = hidden[..., :-1, :].contiguous()
snake_case : Union[str, Any] = labels[..., 1:].contiguous()
snake_case : Any = hidden.view(-1 , hidden.size(-1 ) )
snake_case : Any = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
snake_case : int = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
snake_case : str = self._compute_logit(UpperCAmelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
snake_case : Tuple = labels != -100
snake_case : Optional[Any] = torch.zeros_like(UpperCAmelCase__ , dtype=hidden.dtype , device=hidden.device )
snake_case : Any = (
-nn.functional.log_softmax(UpperCAmelCase__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
snake_case : Any = nn.functional.log_softmax(UpperCAmelCase__ , dim=-1 )
else:
# construct weights and biases
snake_case , snake_case : Any = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
snake_case , snake_case : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
snake_case : str = self.out_layers[0].bias[l_idx:r_idx]
else:
snake_case : Union[str, Any] = self.out_layers[i].weight
snake_case : List[str] = self.out_layers[i].bias
if i == 0:
snake_case : List[str] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
snake_case : int = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCAmelCase__ )
biases.append(UpperCAmelCase__ )
snake_case , snake_case , snake_case : Dict = weights[0], biases[0], self.out_projs[0]
snake_case : Dict = self._compute_logit(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Optional[int] = nn.functional.log_softmax(UpperCAmelCase__ , dim=1 )
if labels is None:
snake_case : List[str] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
snake_case : Optional[Any] = torch.zeros_like(UpperCAmelCase__ , dtype=hidden.dtype , device=hidden.device )
snake_case : Optional[int] = 0
snake_case : Dict = [0] + self.cutoffs
for i in range(len(UpperCAmelCase__ ) - 1 ):
snake_case , snake_case : Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
snake_case : Optional[int] = (labels >= l_idx) & (labels < r_idx)
snake_case : Optional[Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
snake_case : Any = labels.index_select(0 , UpperCAmelCase__ ) - l_idx
snake_case : str = head_logprob.index_select(0 , UpperCAmelCase__ )
snake_case : Dict = hidden.index_select(0 , UpperCAmelCase__ )
else:
snake_case : Dict = hidden
if i == 0:
if labels is not None:
snake_case : int = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
snake_case : str = head_logprob[:, : self.cutoffs[0]]
else:
snake_case , snake_case , snake_case : Tuple = weights[i], biases[i], self.out_projs[i]
snake_case : Dict = self._compute_logit(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Dict = nn.functional.log_softmax(UpperCAmelCase__ , dim=1 )
snake_case : Any = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
snake_case : Any = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
snake_case : Dict = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
snake_case : List[Any] = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCAmelCase__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if self.n_clusters == 0:
snake_case : Optional[Any] = self._compute_logit(UpperCAmelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCAmelCase__ , dim=-1 )
else:
# construct weights and biases
snake_case , snake_case : Optional[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
snake_case , snake_case : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case : List[str] = self.out_layers[0].weight[l_idx:r_idx]
snake_case : Optional[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
snake_case : Any = self.out_layers[i].weight
snake_case : Optional[Any] = self.out_layers[i].bias
if i == 0:
snake_case : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
snake_case : Any = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCAmelCase__ )
biases.append(UpperCAmelCase__ )
snake_case , snake_case , snake_case : Optional[Any] = weights[0], biases[0], self.out_projs[0]
snake_case : int = self._compute_logit(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Optional[int] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
snake_case : List[Any] = nn.functional.log_softmax(UpperCAmelCase__ , dim=1 )
snake_case : int = [0] + self.cutoffs
for i in range(len(UpperCAmelCase__ ) - 1 ):
snake_case , snake_case : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
snake_case : int = head_logprob[:, : self.cutoffs[0]]
else:
snake_case , snake_case , snake_case : Dict = weights[i], biases[i], self.out_projs[i]
snake_case : Tuple = self._compute_logit(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = nn.functional.log_softmax(UpperCAmelCase__ , dim=1 )
snake_case : Tuple = head_logprob[:, -i] + tail_logprob_i
snake_case : Any = logprob_i
return out
| 84 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : str = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 | 1 |
from collections.abc import Callable
import numpy as np
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> np.array:
"""simple docstring"""
snake_case : int = int(np.ceil((x_end - xa) / step_size ) )
snake_case : List[Any] = np.zeros((n + 1,) )
snake_case : List[str] = ya
snake_case : str = xa
for k in range(__magic_name__ ):
snake_case : Optional[Any] = y[k] + step_size * ode_func(__magic_name__ , y[k] )
snake_case : Tuple = y[k] + (
(step_size / 2) * (ode_func(__magic_name__ , y[k] ) + ode_func(x + step_size , __magic_name__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_a : Union[str, Any] = {
'yjernite/retribert-base-uncased': 512,
}
_a : Tuple = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class a_ ( a ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Any = PRETRAINED_INIT_CONFIGURATION
A__ : Optional[Any] = RetriBertTokenizer
A__ : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict="[UNK]" , UpperCAmelCase__ : str="[SEP]" , UpperCAmelCase__ : Union[str, Any]="[PAD]" , UpperCAmelCase__ : Dict="[CLS]" , UpperCAmelCase__ : Optional[Any]="[MASK]" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Dict , ):
"""simple docstring"""
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
snake_case : int = getattr(UpperCAmelCase__ , normalizer_state.pop('''type''' ) )
snake_case : List[Any] = do_lower_case
snake_case : Union[str, Any] = strip_accents
snake_case : int = tokenize_chinese_chars
snake_case : int = normalizer_class(**UpperCAmelCase__ )
snake_case : Union[str, Any] = do_lower_case
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=None ):
"""simple docstring"""
snake_case : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : List[Any] = [self.sep_token_id]
snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
snake_case : Tuple = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 84 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : List[str] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 |
import string
import numpy
def a_ ( __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , __magic_name__ )
class a_ :
A__ : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
A__ : List[str] = numpy.vectorize(lambda a : x % 36 )
A__ : Dict = numpy.vectorize(a )
def __init__( self : List[str] , UpperCAmelCase__ : numpy.ndarray ):
"""simple docstring"""
snake_case : int = self.modulus(UpperCAmelCase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
snake_case : List[str] = encrypt_key.shape[0]
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
return self.key_string.index(UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : int ):
"""simple docstring"""
return self.key_string[round(UpperCAmelCase__ )]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : Tuple = det % len(self.key_string )
snake_case : Tuple = len(self.key_string )
if greatest_common_divisor(UpperCAmelCase__ , len(self.key_string ) ) != 1:
snake_case : List[Any] = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = [char for char in text.upper() if char in self.key_string]
snake_case : Optional[int] = chars[-1]
while len(UpperCAmelCase__ ) % self.break_key != 0:
chars.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = self.process_text(text.upper() )
snake_case : Optional[int] = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : int = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : Tuple = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(self.encrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[
0
]
snake_case : Dict = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : int = det % len(self.key_string )
snake_case : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
snake_case : Any = i
break
snake_case : Any = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCAmelCase__ ) )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Any = self.make_decrypt_key()
snake_case : Optional[Any] = self.process_text(text.upper() )
snake_case : int = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : Any = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : List[str] = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(decrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[0]
snake_case : int = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a_ ( ) -> None:
"""simple docstring"""
snake_case : Any = int(input('''Enter the order of the encryption key: ''' ) )
snake_case : List[Any] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(__magic_name__ ):
snake_case : Optional[Any] = [int(__magic_name__ ) for x in input().split()]
hill_matrix.append(__magic_name__ )
snake_case : List[str] = HillCipher(numpy.array(__magic_name__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
snake_case : int = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
snake_case : List[Any] = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(__magic_name__ ) )
elif option == "2":
snake_case : int = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 84 | 1 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ ( a ):
A__ : List[Any] = 'new-model'
if is_tf_available():
class a_ ( a ):
A__ : str = NewModelConfig
@require_tf
class a_ ( unittest.TestCase ):
@slow
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = '''bert-base-cased'''
snake_case : Dict = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = TFAutoModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Tuple = '''bert-base-cased'''
snake_case : List[Any] = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : str ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[Any] = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase__ )
snake_case , snake_case : List[str] = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[Any] = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : str = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Any = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Tuple = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase__ )
snake_case , snake_case : Any = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : int = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ )
snake_case , snake_case : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case : Any = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : int ):
"""simple docstring"""
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case : Tuple = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : int = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
@require_tensorflow_probability
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
snake_case : Tuple = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Optional[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCAmelCase__ )
snake_case , snake_case : Tuple = TFAutoModelForTableQuestionAnswering.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : int = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 14_410 )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 14_410 )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
snake_case : int = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Dict = copy.deepcopy(model.config )
snake_case : str = ['''FunnelBaseModel''']
snake_case : Dict = TFAutoModel.from_config(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase__ )
snake_case : Any = TFAutoModel.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
try:
AutoConfig.register('''new-model''' , UpperCAmelCase__ )
snake_case : Optional[Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(UpperCAmelCase__ ):
auto_class.register(UpperCAmelCase__ , UpperCAmelCase__ )
auto_class.register(UpperCAmelCase__ , UpperCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase__ ):
auto_class.register(UpperCAmelCase__ , UpperCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case : Dict = BertModelTester(self ).get_config()
snake_case : Union[str, Any] = NewModelConfig(**tiny_config.to_dict() )
snake_case : List[Any] = auto_class.from_config(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase__ )
snake_case : int = auto_class.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
snake_case : int = TFAutoModel.from_pretrained('''bert-base''' )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case : str = TFAutoModel.from_pretrained(UpperCAmelCase__ , revision='''aaaaaa''' )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
snake_case : List[str] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(UpperCAmelCase__ , '''Use `from_pt=True` to load this model''' ):
snake_case : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def lowerCAmelCase( self : str ):
"""simple docstring"""
# Make sure we have cached the model.
snake_case : Union[str, Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
snake_case : Dict = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
snake_case : Tuple = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
snake_case : Optional[int] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 84 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( a ):
A__ : List[Any] = 'Salesforce/blip-image-captioning-base'
A__ : Dict = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
A__ : str = 'image_captioner'
A__ : Dict = AutoModelForVisionaSeq
A__ : Optional[Any] = ['image']
A__ : List[str] = ['text']
def __init__( self : List[str] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=UpperCAmelCase__ , return_tensors='''pt''' )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return self.model.generate(**UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0].strip()
| 84 | 1 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def a_ ( __magic_name__ ) -> Optional[int]:
"""simple docstring"""
snake_case : int = FileLock(str(tmpdir / '''foo.lock''' ) )
snake_case : List[Any] = FileLock(str(tmpdir / '''foo.lock''' ) )
snake_case : List[Any] = 0.01
with locka.acquire():
with pytest.raises(__magic_name__ ):
snake_case : Dict = time.time()
locka.acquire(__magic_name__ )
assert time.time() - _start > timeout
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
snake_case : Dict = '''a''' * 1_000 + '''.lock'''
snake_case : Optional[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(__magic_name__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
snake_case : Optional[Any] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__magic_name__ ):
locka.acquire(0 )
| 84 |
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
snake_case : int = 4
snake_case : Optional[Any] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 84 | 1 |
import math
def a_ ( __magic_name__ ) -> str:
"""simple docstring"""
snake_case : int = 0
snake_case : List[str] = 0
while num > 0:
snake_case : str = num % 8
snake_case : str = octal + (remainder * math.floor(math.pow(10 , __magic_name__ ) ))
counter += 1
snake_case : List[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"0o{int(__magic_name__ )}"
def a_ ( ) -> None:
"""simple docstring"""
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(216 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(512 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 84 |
from sklearn.metrics import fa_score
import datasets
_a : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
_a : Dict = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
_a : List[Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : List[str]="binary" , UpperCAmelCase__ : str=None ):
"""simple docstring"""
snake_case : List[Any] = fa_score(
UpperCAmelCase__ , UpperCAmelCase__ , labels=UpperCAmelCase__ , pos_label=UpperCAmelCase__ , average=UpperCAmelCase__ , sample_weight=UpperCAmelCase__ )
return {"f1": float(UpperCAmelCase__ ) if score.size == 1 else score}
| 84 | 1 |
import requests
_a : Tuple = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def a_ ( __magic_name__ ) -> None:
"""simple docstring"""
snake_case : Tuple = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F"{i}.) {article['title']}" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 84 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('''only integers accepted as input''' )
else:
snake_case : str = str(abs(__magic_name__ ) )
snake_case : Optional[Any] = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int(''''''.join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 84 | 1 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_a : Optional[Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
_a : str = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
_a : List[Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Optional[int]=0.9 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : List[Any]=500 , UpperCAmelCase__ : Union[str, Any]="gpt2-large" , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : List[Any]=25 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=25 , ):
"""simple docstring"""
snake_case : List[str] = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 84 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=99 , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Any=9 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : str=0.002 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , ):
"""simple docstring"""
snake_case : Union[str, Any] = parent
snake_case : Union[str, Any] = batch_size
snake_case : Any = encoder_seq_length
snake_case : str = decoder_seq_length
# For common tests
snake_case : Optional[int] = self.decoder_seq_length
snake_case : Optional[Any] = is_training
snake_case : List[Any] = use_attention_mask
snake_case : Union[str, Any] = use_labels
snake_case : Any = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : Any = d_ff
snake_case : Any = relative_attention_num_buckets
snake_case : Optional[Any] = dropout_rate
snake_case : int = initializer_factor
snake_case : Optional[Any] = eos_token_id
snake_case : Dict = pad_token_id
snake_case : Optional[Any] = decoder_start_token_id
snake_case : Union[str, Any] = None
snake_case : List[str] = decoder_layers
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
return TaConfig.from_pretrained('''google/umt5-base''' )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
snake_case : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case : List[Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if decoder_head_mask is None:
snake_case : Tuple = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if cross_attn_head_mask is None:
snake_case : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case : List[str] = input_ids.clamp(self.pad_token_id + 1 )
snake_case : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case : str = self.get_config()
snake_case : Tuple = config.num_attention_heads
snake_case : List[Any] = self.prepare_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, input_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , ):
"""simple docstring"""
snake_case : str = UMTaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : str = model(
input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , )
snake_case : int = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
snake_case : int = result.last_hidden_state
snake_case : Dict = result.past_key_values
snake_case : Dict = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval()
# first forward pass
snake_case : List[Any] = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
snake_case : List[Any] = model(UpperCAmelCase__ )
snake_case : Any = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) + 1 )
snake_case , snake_case : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : Any = model(UpperCAmelCase__ )['''last_hidden_state''']
snake_case : Optional[Any] = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )['''last_hidden_state''']
# select random slice
snake_case : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).half().eval()
snake_case : str = model(**UpperCAmelCase__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(UpperCAmelCase__ ).any().item() )
@require_torch
class a_ ( a , a , a , unittest.TestCase ):
A__ : str = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A__ : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A__ : Any = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A__ : Dict = True
A__ : List[str] = False
A__ : Optional[int] = False
A__ : Optional[int] = True
A__ : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A__ : int = [0.8, 0.9]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
snake_case : Optional[Any] = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=UpperCAmelCase__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Optional[int] = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
snake_case : int = config_and_inputs[0]
snake_case : Union[str, Any] = UMTaForConditionalGeneration(UpperCAmelCase__ ).eval()
model.to(UpperCAmelCase__ )
snake_case : str = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase__ , head_masking.items() ):
snake_case : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case : List[str] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ )
snake_case : Union[str, Any] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , **UpperCAmelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=UpperCAmelCase__ ).to(UpperCAmelCase__ )
snake_case : int = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=UpperCAmelCase__ , legacy=UpperCAmelCase__ )
snake_case : List[str] = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
snake_case : Dict = tokenizer(UpperCAmelCase__ , return_tensors='''pt''' , padding=UpperCAmelCase__ ).input_ids
# fmt: off
snake_case : Optional[Any] = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = model.generate(input_ids.to(UpperCAmelCase__ ) )
snake_case : int = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
snake_case : Tuple = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 84 | 1 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_a : Any = logging.get_logger(__name__)
class a_ :
A__ : str
A__ : str = None
@staticmethod
def lowerCAmelCase( ):
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , **UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Any ):
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase( self : Any ):
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def lowerCAmelCase( cls : Optional[Any] ):
"""simple docstring"""
return F"`pip install {cls.pip_package or cls.name}`"
class a_ ( a ):
A__ : Optional[int] = 'optuna'
@staticmethod
def lowerCAmelCase( ):
"""simple docstring"""
return is_optuna_available()
def lowerCAmelCase( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , **UpperCAmelCase__ : str ):
"""simple docstring"""
return run_hp_search_optuna(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return default_hp_space_optuna(UpperCAmelCase__ )
class a_ ( a ):
A__ : Union[str, Any] = 'ray'
A__ : Any = '\'ray[tune]\''
@staticmethod
def lowerCAmelCase( ):
"""simple docstring"""
return is_ray_available()
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , **UpperCAmelCase__ : Tuple ):
"""simple docstring"""
return run_hp_search_ray(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
return default_hp_space_ray(UpperCAmelCase__ )
class a_ ( a ):
A__ : Any = 'sigopt'
@staticmethod
def lowerCAmelCase( ):
"""simple docstring"""
return is_sigopt_available()
def lowerCAmelCase( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
return run_hp_search_sigopt(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Dict ):
"""simple docstring"""
return default_hp_space_sigopt(UpperCAmelCase__ )
class a_ ( a ):
A__ : List[Any] = 'wandb'
@staticmethod
def lowerCAmelCase( ):
"""simple docstring"""
return is_wandb_available()
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : str , **UpperCAmelCase__ : Any ):
"""simple docstring"""
return run_hp_search_wandb(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return default_hp_space_wandb(UpperCAmelCase__ )
_a : Optional[int] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def a_ ( ) -> str:
"""simple docstring"""
snake_case : List[str] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__magic_name__ ) > 0:
snake_case : Dict = available_backends[0].name
if len(__magic_name__ ) > 1:
logger.info(
F"{len(__magic_name__ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 84 |
import torch
from diffusers import DiffusionPipeline
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
def __call__( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
snake_case : Dict = 1
snake_case : Optional[Any] = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
snake_case : List[Any] = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
snake_case : List[Any] = scheduler_output - scheduler_output + torch.ones_like(UpperCAmelCase__ )
return result
| 84 | 1 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a_ ( a , unittest.TestCase ):
A__ : Dict = DDIMPipeline
A__ : Any = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
A__ : Tuple = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
A__ : str = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
A__ : int = False
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
snake_case : Optional[Any] = DDIMScheduler()
snake_case : int = {'''unet''': unet, '''scheduler''': scheduler}
return components
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple=0 ):
"""simple docstring"""
if str(UpperCAmelCase__ ).startswith('''mps''' ):
snake_case : int = torch.manual_seed(UpperCAmelCase__ )
else:
snake_case : Dict = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
snake_case : List[Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Dict = '''cpu'''
snake_case : Any = self.get_dummy_components()
snake_case : int = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : Any = self.get_dummy_inputs(UpperCAmelCase__ )
snake_case : Optional[int] = pipe(**UpperCAmelCase__ ).images
snake_case : Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
snake_case : Dict = np.array(
[1.0_0_0e0_0, 5.7_1_7e-0_1, 4.7_1_7e-0_1, 1.0_0_0e0_0, 0.0_0_0e0_0, 1.0_0_0e0_0, 3.0_0_0e-0_4, 0.0_0_0e0_0, 9.0_0_0e-0_4] )
snake_case : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ , 1e-3 )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3e-3 )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : List[str] = '''google/ddpm-cifar10-32'''
snake_case : str = UNetaDModel.from_pretrained(UpperCAmelCase__ )
snake_case : List[Any] = DDIMScheduler()
snake_case : Tuple = DDIMPipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
ddim.to(UpperCAmelCase__ )
ddim.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : Optional[int] = torch.manual_seed(0 )
snake_case : str = ddim(generator=UpperCAmelCase__ , eta=0.0 , output_type='''numpy''' ).images
snake_case : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case : int = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : str = '''google/ddpm-ema-bedroom-256'''
snake_case : Optional[int] = UNetaDModel.from_pretrained(UpperCAmelCase__ )
snake_case : Optional[int] = DDIMScheduler.from_pretrained(UpperCAmelCase__ )
snake_case : Tuple = DDIMPipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
ddpm.to(UpperCAmelCase__ )
ddpm.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : Union[str, Any] = torch.manual_seed(0 )
snake_case : str = ddpm(generator=UpperCAmelCase__ , output_type='''numpy''' ).images
snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 84 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( a ):
A__ : List[str] = ['image_processor', 'tokenizer']
A__ : Any = 'CLIPImageProcessor'
A__ : Optional[int] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase__ , )
snake_case : List[Any] = kwargs.pop('''feature_extractor''' )
snake_case : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self : Any , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case : int = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if images is not None:
snake_case : Dict = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and images is not None:
snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : str ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : int = self.tokenizer.model_input_names
snake_case : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase__ , )
return self.image_processor_class
@property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase__ , )
return self.image_processor
| 84 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_a : Tuple = logging.get_logger(__name__)
# General docstring
_a : List[Any] = 'RegNetConfig'
# Base docstring
_a : int = 'facebook/regnet-y-040'
_a : int = [1, 1_088, 7, 7]
# Image classification docstring
_a : Tuple = 'facebook/regnet-y-040'
_a : int = 'tabby, tabby cat'
_a : Union[str, Any] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[str] = "relu" , **UpperCAmelCase__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
snake_case : Any = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
snake_case : Any = tf.keras.layers.ConvaD(
filters=UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , strides=UpperCAmelCase__ , padding='''VALID''' , groups=UpperCAmelCase__ , use_bias=UpperCAmelCase__ , name='''convolution''' , )
snake_case : int = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
snake_case : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
snake_case : Any = self.convolution(self.padding(UpperCAmelCase__ ) )
snake_case : str = self.normalization(UpperCAmelCase__ )
snake_case : Tuple = self.activation(UpperCAmelCase__ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self : Any , UpperCAmelCase__ : RegNetConfig , **UpperCAmelCase__ : int ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Union[str, Any] = config.num_channels
snake_case : List[str] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
snake_case : List[str] = shape_list(UpperCAmelCase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
snake_case : Tuple = tf.transpose(UpperCAmelCase__ , perm=(0, 2, 3, 1) )
snake_case : str = self.embedder(UpperCAmelCase__ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , **UpperCAmelCase__ : Dict ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Any = tf.keras.layers.ConvaD(
filters=UpperCAmelCase__ , kernel_size=1 , strides=UpperCAmelCase__ , use_bias=UpperCAmelCase__ , name='''convolution''' )
snake_case : Union[str, Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : bool = False ):
"""simple docstring"""
return self.normalization(self.convolution(UpperCAmelCase__ ) , training=UpperCAmelCase__ )
class a_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase__ , name='''pooler''' )
snake_case : List[str] = [
tf.keras.layers.ConvaD(filters=UpperCAmelCase__ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=UpperCAmelCase__ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
snake_case : Dict = self.pooler(UpperCAmelCase__ )
for layer_module in self.attention:
snake_case : int = layer_module(UpperCAmelCase__ )
snake_case : Optional[int] = hidden_state * pooled
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self : List[str] , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Tuple = in_channels != out_channels or stride != 1
snake_case : List[str] = max(1 , out_channels // config.groups_width )
snake_case : Tuple = (
TFRegNetShortCut(UpperCAmelCase__ , stride=UpperCAmelCase__ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
snake_case : Dict = [
TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ , name='''layer.2''' ),
]
snake_case : int = ACTaFN[config.hidden_act]
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Dict ):
"""simple docstring"""
snake_case : Optional[int] = hidden_state
for layer_module in self.layers:
snake_case : List[Any] = layer_module(UpperCAmelCase__ )
snake_case : Dict = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
snake_case : Optional[int] = self.activation(UpperCAmelCase__ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self : int , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : Dict ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : int = in_channels != out_channels or stride != 1
snake_case : str = max(1 , out_channels // config.groups_width )
snake_case : Union[str, Any] = (
TFRegNetShortCut(UpperCAmelCase__ , stride=UpperCAmelCase__ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
snake_case : str = [
TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(UpperCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ , name='''layer.3''' ),
]
snake_case : Tuple = ACTaFN[config.hidden_act]
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Union[str, Any] = hidden_state
for layer_module in self.layers:
snake_case : Any = layer_module(UpperCAmelCase__ )
snake_case : Optional[Any] = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
snake_case : str = self.activation(UpperCAmelCase__ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , **UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Dict = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
snake_case : Any = [
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , name='''layers.0''' ),
*[layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , name=F"layers.{i+1}" ) for i in range(depth - 1 )],
]
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
for layer_module in self.layers:
snake_case : Any = layer_module(UpperCAmelCase__ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : RegNetConfig , **UpperCAmelCase__ : str ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : str = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
snake_case : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCAmelCase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__ , name=F"stages.{i+1}" ) )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True ):
"""simple docstring"""
snake_case : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case : List[str] = hidden_states + (hidden_state,)
snake_case : List[Any] = stage_module(UpperCAmelCase__ )
if output_hidden_states:
snake_case : List[str] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ )
@keras_serializable
class a_ ( tf.keras.layers.Layer ):
A__ : List[str] = RegNetConfig
def __init__( self : int , UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : str = config
snake_case : Tuple = TFRegNetEmbeddings(UpperCAmelCase__ , name='''embedder''' )
snake_case : int = TFRegNetEncoder(UpperCAmelCase__ , name='''encoder''' )
snake_case : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase__ , name='''pooler''' )
@unpack_inputs
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , ):
"""simple docstring"""
snake_case : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
snake_case : Optional[Any] = self.embedder(UpperCAmelCase__ , training=UpperCAmelCase__ )
snake_case : List[Any] = self.encoder(
UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ )
snake_case : int = encoder_outputs[0]
snake_case : Tuple = self.pooler(UpperCAmelCase__ )
# Change to NCHW output format have uniformity in the modules
snake_case : Union[str, Any] = tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) )
snake_case : List[Any] = tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
snake_case : Tuple = tuple([tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class a_ ( a ):
A__ : List[Any] = RegNetConfig
A__ : List[Any] = 'regnet'
A__ : List[Any] = 'pixel_values'
@property
def lowerCAmelCase( self : int ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_a : Optional[Any] = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
_a : Union[str, Any] = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , a , )
class a_ ( a ):
def __init__( self : List[Any] , UpperCAmelCase__ : RegNetConfig , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : int ):
"""simple docstring"""
super().__init__(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
snake_case : Dict = TFRegNetMainLayer(UpperCAmelCase__ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Any]=False , ):
"""simple docstring"""
snake_case : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case : Dict = return_dict if return_dict is not None else self.config.use_return_dict
snake_case : List[Any] = self.regnet(
pixel_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , a , )
class a_ ( a , a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : RegNetConfig , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
super().__init__(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
snake_case : List[Any] = config.num_labels
snake_case : List[Any] = TFRegNetMainLayer(UpperCAmelCase__ , name='''regnet''' )
# classification head
snake_case : Optional[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : tf.Tensor = None , UpperCAmelCase__ : tf.Tensor = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : List[str]=False , ):
"""simple docstring"""
snake_case : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict
snake_case : int = self.regnet(
UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ )
snake_case : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
snake_case : Dict = self.classifier[0](UpperCAmelCase__ )
snake_case : List[Any] = self.classifier[1](UpperCAmelCase__ )
snake_case : Dict = None if labels is None else self.hf_compute_loss(labels=UpperCAmelCase__ , logits=UpperCAmelCase__ )
if not return_dict:
snake_case : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states )
| 84 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_a : Optional[Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
_a : str = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
_a : List[Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Optional[int]=0.9 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : List[Any]=500 , UpperCAmelCase__ : Union[str, Any]="gpt2-large" , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : List[Any]=25 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=25 , ):
"""simple docstring"""
snake_case : List[str] = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 84 | 1 |
from math import isqrt, loga
def a_ ( __magic_name__ ) -> list[int]:
"""simple docstring"""
snake_case : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __magic_name__ , __magic_name__ ):
snake_case : Any = False
return [i for i in range(2 , __magic_name__ ) if is_prime[i]]
def a_ ( __magic_name__ = 800_800 , __magic_name__ = 800_800 ) -> int:
"""simple docstring"""
snake_case : List[Any] = degree * loga(__magic_name__ )
snake_case : int = int(__magic_name__ )
snake_case : int = calculate_prime_numbers(__magic_name__ )
snake_case : Any = 0
snake_case : Union[str, Any] = 0
snake_case : int = len(__magic_name__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 84 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
if "cls_token" in name:
snake_case : Tuple = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
snake_case : Optional[int] = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
snake_case : List[str] = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case : List[Any] = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case : int = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
snake_case : int = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
snake_case : Optional[Any] = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
snake_case : str = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case : Any = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
snake_case : Dict = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
snake_case : Dict = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
snake_case : Dict = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
snake_case : Optional[int] = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def a_ ( __magic_name__ , __magic_name__ ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case : Union[str, Any] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
snake_case : Optional[int] = key.split('''.''' )
snake_case : int = int(key_split[1] )
if "decoder_blocks" in key:
snake_case : List[str] = config.decoder_hidden_size
snake_case : List[Any] = '''decoder.decoder_layers.'''
if "weight" in key:
snake_case : str = val[:dim, :]
snake_case : Optional[Any] = val[dim : dim * 2, :]
snake_case : Any = val[-dim:, :]
elif "bias" in key:
snake_case : Optional[Any] = val[:dim]
snake_case : List[Any] = val[dim : dim * 2]
snake_case : List[Any] = val[-dim:]
else:
snake_case : Optional[int] = config.hidden_size
snake_case : Tuple = '''vit.encoder.layer.'''
if "weight" in key:
snake_case : Optional[Any] = val[:dim, :]
snake_case : str = val[dim : dim * 2, :]
snake_case : Union[str, Any] = val[-dim:, :]
elif "bias" in key:
snake_case : Tuple = val[:dim]
snake_case : int = val[dim : dim * 2]
snake_case : Optional[Any] = val[-dim:]
else:
snake_case : Optional[Any] = val
return orig_state_dict
def a_ ( __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : List[str] = ViTMAEConfig()
if "large" in checkpoint_url:
snake_case : str = 1_024
snake_case : Tuple = 4_096
snake_case : Optional[Any] = 24
snake_case : List[Any] = 16
elif "huge" in checkpoint_url:
snake_case : Tuple = 14
snake_case : int = 1_280
snake_case : Dict = 5_120
snake_case : Tuple = 32
snake_case : Optional[Any] = 16
snake_case : Optional[Any] = ViTMAEForPreTraining(__magic_name__ )
snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''model''']
snake_case : int = ViTMAEImageProcessor(size=config.image_size )
snake_case : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
snake_case : Tuple = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
snake_case : List[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
snake_case : Dict = ViTMAEImageProcessor(size=config.image_size )
snake_case : str = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
snake_case : Union[str, Any] = model(**__magic_name__ )
snake_case : Optional[Any] = outputs.logits
if "large" in checkpoint_url:
snake_case : Any = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
snake_case : List[Any] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
snake_case : Dict = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a : str = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 84 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_a : Tuple = (3, 9, -11, 0, 7, 5, 1, -1)
_a : Tuple = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class a_ :
A__ : int
A__ : Node | None
class a_ :
def __init__( self : int , UpperCAmelCase__ : Iterable[int] ):
"""simple docstring"""
snake_case : Node | None = None
for i in sorted(UpperCAmelCase__ , reverse=UpperCAmelCase__ ):
snake_case : str = Node(UpperCAmelCase__ , self.head )
def __iter__( self : Optional[Any] ):
"""simple docstring"""
snake_case : Any = self.head
while node:
yield node.data
snake_case : str = node.next_node
def __len__( self : str ):
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self : Optional[int] ):
"""simple docstring"""
return " -> ".join([str(UpperCAmelCase__ ) for node in self] )
def a_ ( __magic_name__ , __magic_name__ ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__magic_name__ ) + list(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : Union[str, Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 84 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : Optional[Any] = 16
_a : Union[str, Any] = 32
def a_ ( __magic_name__ , __magic_name__ = 16 ) -> Dict:
"""simple docstring"""
snake_case : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case : Union[str, Any] = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case : Dict = 8
else:
snake_case : Union[str, Any] = None
return tokenizer.pad(
__magic_name__ , padding='''longest''' , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
snake_case : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : Optional[int] = mocked_dataloaders # noqa: F811
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __magic_name__ ) == "1":
snake_case : Optional[int] = 2
# Initialize accelerator
snake_case : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : Dict = config['''lr''']
snake_case : Any = int(config['''num_epochs'''] )
snake_case : List[str] = int(config['''seed'''] )
snake_case : List[Any] = int(config['''batch_size'''] )
snake_case : Tuple = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : str = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case : Optional[int] = AdamW(params=model.parameters() , lr=__magic_name__ )
snake_case , snake_case : List[Any] = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate scheduler
snake_case : int = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case : Tuple = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case : int = model(**__magic_name__ )
snake_case : Optional[int] = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case : List[str] = model(**__magic_name__ )
snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
snake_case , snake_case : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
snake_case : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__magic_name__ , default=__magic_name__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case : Optional[Any] = parser.parse_args()
snake_case : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 84 | 1 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_a : Union[str, Any] = logging.get_logger(__name__)
def a_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case : Any = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
snake_case : Union[str, Any] = json.loads(__magic_name__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
snake_case : int = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
snake_case : str = json.loads(__magic_name__ )
if not mpi_options.get('''sagemaker_mpi_enabled''' , __magic_name__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class a_ ( a ):
A__ : str = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , UpperCAmelCase__ , )
@cached_property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
snake_case : int = torch.device('''cpu''' )
snake_case : Optional[Any] = 0
elif is_sagemaker_model_parallel_available():
snake_case : Union[str, Any] = smp.local_rank()
snake_case : Union[str, Any] = torch.device('''cuda''' , UpperCAmelCase__ )
snake_case : Dict = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta )
snake_case : Optional[Any] = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
snake_case : List[str] = torch.device('''cuda''' , self.local_rank )
snake_case : List[Any] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
snake_case : List[str] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
snake_case : List[str] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta )
snake_case : Union[str, Any] = torch.device('''cuda''' , self.local_rank )
snake_case : int = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCAmelCase__ )
return device
@property
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
return False
| 84 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_a : Dict = logging.get_logger(__name__)
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = None ) -> str:
"""simple docstring"""
snake_case : Any = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
snake_case : str = to_pil_image(__magic_name__ )
snake_case , snake_case : Union[str, Any] = pil_image.size
snake_case : List[Any] = pytesseract.image_to_data(__magic_name__ , lang=__magic_name__ , output_type='''dict''' , config=__magic_name__ )
snake_case , snake_case , snake_case , snake_case , snake_case : Optional[Any] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
snake_case : Union[str, Any] = [idx for idx, word in enumerate(__magic_name__ ) if not word.strip()]
snake_case : Union[str, Any] = [word for idx, word in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : Optional[Any] = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case : List[Any] = []
for x, y, w, h in zip(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
snake_case : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(__magic_name__ )
# finally, normalize the bounding boxes
snake_case : List[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__magic_name__ , __magic_name__ , __magic_name__ ) )
assert len(__magic_name__ ) == len(__magic_name__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a_ ( a ):
A__ : int = ['pixel_values']
def __init__( self : Optional[int] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "" , **UpperCAmelCase__ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Any = size if size is not None else {'''height''': 224, '''width''': 224}
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : Dict = do_resize
snake_case : str = size
snake_case : Optional[int] = resample
snake_case : Union[str, Any] = apply_ocr
snake_case : int = ocr_lang
snake_case : Union[str, Any] = tesseract_config
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ):
"""simple docstring"""
snake_case : Dict = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
snake_case : Tuple = (size['''height'''], size['''width'''])
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : List[Any] , ):
"""simple docstring"""
snake_case : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case : List[Any] = size if size is not None else self.size
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : str = resample if resample is not None else self.resample
snake_case : Optional[int] = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case : List[str] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
snake_case : Any = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
snake_case : Optional[int] = []
snake_case : Union[str, Any] = []
for image in images:
snake_case , snake_case : List[Any] = apply_tesseract(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
words_batch.append(UpperCAmelCase__ )
boxes_batch.append(UpperCAmelCase__ )
if do_resize:
snake_case : Any = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case : int = [flip_channel_order(UpperCAmelCase__ ) for image in images]
snake_case : Optional[Any] = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
snake_case : List[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase__ )
if apply_ocr:
snake_case : Dict = words_batch
snake_case : Dict = boxes_batch
return data
| 84 | 1 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
snake_case : List[Any] = TOKENIZER_CLASSES
else:
snake_case : Any = {tokenizer_name: getattr(__magic_name__ , tokenizer_name + '''Fast''' )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
snake_case : List[str] = TOKENIZER_CLASSES[tokenizer_name]
snake_case : str = True
if checkpoint_name is None:
snake_case : Dict = list(tokenizer_class.max_model_input_sizes.keys() )
else:
snake_case : Tuple = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
snake_case : int = tokenizer_class.from_pretrained(__magic_name__ , force_download=__magic_name__ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
snake_case , snake_case : Any = checkpoint.split('''/''' )
snake_case : str = os.path.join(__magic_name__ , __magic_name__ )
elif add_prefix:
snake_case : Optional[Any] = checkpoint
snake_case : List[Any] = dump_path
else:
snake_case : List[Any] = None
snake_case : List[Any] = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
snake_case : Optional[int] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
snake_case : int = file_path.split(__magic_name__ )[-1][0]
if next_char == "/":
snake_case : Optional[Any] = os.path.join(__magic_name__ , __magic_name__ )
snake_case : Union[str, Any] = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
snake_case : Dict = tokenizer.save_pretrained(
__magic_name__ , legacy_format=__magic_name__ , filename_prefix=__magic_name__ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(__magic_name__ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
f"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
_a : Optional[int] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 84 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[Any]=24 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Optional[int]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Optional[Any]=2 , ):
"""simple docstring"""
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : str = patch_size
snake_case : Union[str, Any] = max_length
snake_case : str = num_mel_bins
snake_case : Any = is_training
snake_case : Union[str, Any] = use_labels
snake_case : Tuple = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : str = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : str = scope
snake_case : int = frequency_stride
snake_case : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
snake_case : Any = (self.max_length - self.patch_size) // self.time_stride + 1
snake_case : Union[str, Any] = frequency_out_dimension * time_out_dimension
snake_case : Union[str, Any] = num_patches + 2
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
snake_case : str = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[str] = self.get_config()
return config, input_values, labels
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : str = ASTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : int = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : int = config_and_inputs
snake_case : Tuple = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class a_ ( a , a , unittest.TestCase ):
A__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
A__ : int = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Dict = False
A__ : int = False
A__ : Optional[int] = False
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[int] = ASTModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Any = model_class(UpperCAmelCase__ )
snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : List[str] = ['''input_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = ASTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Dict = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
snake_case , snake_case : int = torchaudio.load(__magic_name__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class a_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : List[str] = self.default_feature_extractor
snake_case : str = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(UpperCAmelCase__ )
snake_case : str = self.default_feature_extractor
snake_case , snake_case : int = prepare_audio()
snake_case : Optional[int] = audio.squeeze().numpy()
snake_case : Optional[Any] = feature_extractor(UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case : Union[str, Any] = model(**UpperCAmelCase__ )
# verify the logits
snake_case : Any = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
snake_case : str = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 84 | 1 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_a : Optional[Any] = sys.version_info >= (3, 10)
def a_ ( __magic_name__=None , __magic_name__=None ) -> Dict:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__magic_name__ )
@dataclass
class a_ :
A__ : int
A__ : float
A__ : str
A__ : bool
@dataclass
class a_ :
A__ : int = 42
A__ : str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class a_ :
A__ : bool = False
A__ : bool = True
A__ : Optional[bool] = None
class a_ ( a ):
A__ : Tuple = 'titi'
A__ : Tuple = 'toto'
class a_ ( a ):
A__ : int = 'titi'
A__ : Dict = 'toto'
A__ : List[Any] = 42
@dataclass
class a_ :
A__ : BasicEnum = "toto"
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : int = BasicEnum(self.foo )
@dataclass
class a_ :
A__ : MixedTypeEnum = "toto"
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class a_ :
A__ : Optional[int] = None
A__ : Optional[float] = field(default=a , metadata={'help': 'help message'} )
A__ : Optional[str] = None
A__ : Optional[List[str]] = list_field(default=[] )
A__ : Optional[List[int]] = list_field(default=[] )
@dataclass
class a_ :
A__ : List[int] = list_field(default=[] )
A__ : List[int] = list_field(default=[1, 2, 3] )
A__ : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
A__ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class a_ :
A__ : List[int] = field()
A__ : str = field()
A__ : BasicEnum = field()
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[int] = BasicEnum(self.required_enum )
@dataclass
class a_ :
A__ : int
A__ : "BasicEnum" = field()
A__ : "Optional[bool]" = None
A__ : "str" = field(default='toto' , metadata={'help': 'help message'} )
A__ : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class a_ :
A__ : bool = False
A__ : bool = True
A__ : bool | None = None
@dataclass
class a_ :
A__ : int | None = None
A__ : float | None = field(default=a , metadata={'help': 'help message'} )
A__ : str | None = None
A__ : list[str] | None = list_field(default=[] )
A__ : list[int] | None = list_field(default=[] )
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : argparse.ArgumentParser , UpperCAmelCase__ : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case : Optional[int] = {k: v for k, v in vars(UpperCAmelCase__ ).items() if k != '''container'''}
snake_case : Tuple = {k: v for k, v in vars(UpperCAmelCase__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , UpperCAmelCase__ ) and yy.get('''choices''' , UpperCAmelCase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](UpperCAmelCase__ ) , yy['''type'''](UpperCAmelCase__ ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[str] = HfArgumentParser(UpperCAmelCase__ )
snake_case : Dict = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument('''--bar''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument('''--baz''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument('''--flag''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs='''?''' )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Dict = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((snake_case) , ) : List[str] = parser.parse_args_into_dataclasses(UpperCAmelCase__ , look_for_args_file=UpperCAmelCase__ )
self.assertFalse(example.flag )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = HfArgumentParser(UpperCAmelCase__ )
snake_case : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=UpperCAmelCase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=UpperCAmelCase__ , help='''help message''' )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Tuple = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs='''?''' )
expected.add_argument('''--baz''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=UpperCAmelCase__ , dest='''baz''' )
expected.add_argument('''--opt''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ )
snake_case : int = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase__ )
for dataclass_type in dataclass_types:
snake_case : int = HfArgumentParser(UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = parser.parse_args([] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
snake_case : Tuple = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
snake_case : Optional[Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
snake_case : Union[str, Any] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
snake_case : str = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[Any] = HfArgumentParser(UpperCAmelCase__ )
snake_case : Optional[Any] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case : int = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case : Optional[Any] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case : Union[str, Any] = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
snake_case : Optional[int] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
@dataclass
class a_ :
A__ : Literal["titi", "toto", 42] = "toto"
snake_case : List[Any] = HfArgumentParser(UpperCAmelCase__ )
snake_case : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : int = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case : Dict = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case : Tuple = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : str = HfArgumentParser(UpperCAmelCase__ )
snake_case : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=UpperCAmelCase__ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=UpperCAmelCase__ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=UpperCAmelCase__ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Union[str, Any] = parser.parse_args([] )
self.assertEqual(
UpperCAmelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case : Optional[int] = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(UpperCAmelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=UpperCAmelCase__ , type=UpperCAmelCase__ )
expected.add_argument('''--bar''' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , help='''help message''' )
expected.add_argument('''--baz''' , default=UpperCAmelCase__ , type=UpperCAmelCase__ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=UpperCAmelCase__ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=UpperCAmelCase__ )
snake_case : List[str] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase__ )
for dataclass_type in dataclass_types:
snake_case : Dict = HfArgumentParser(UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = parser.parse_args([] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , bar=UpperCAmelCase__ , baz=UpperCAmelCase__ , ces=[] , des=[] ) )
snake_case : Dict = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[int] = HfArgumentParser(UpperCAmelCase__ )
snake_case : str = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument('''--required_str''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=UpperCAmelCase__ , )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : List[str] = HfArgumentParser(UpperCAmelCase__ )
snake_case : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=UpperCAmelCase__ , )
expected.add_argument('''--opt''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=UpperCAmelCase__ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[Any] = HfArgumentParser(UpperCAmelCase__ )
snake_case : Dict = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
snake_case : str = parser.parse_dict(UpperCAmelCase__ )[0]
snake_case : Tuple = BasicExample(**UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Union[str, Any] = HfArgumentParser(UpperCAmelCase__ )
snake_case : Optional[int] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(UpperCAmelCase__ , parser.parse_dict , UpperCAmelCase__ , allow_extra_keys=UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Dict = HfArgumentParser(UpperCAmelCase__ )
snake_case : Any = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : List[str] = os.path.join(UpperCAmelCase__ , '''temp_json''' )
os.mkdir(UpperCAmelCase__ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Tuple = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
snake_case : Dict = BasicExample(**UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Dict = HfArgumentParser(UpperCAmelCase__ )
snake_case : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : List[str] = os.path.join(UpperCAmelCase__ , '''temp_yaml''' )
os.mkdir(UpperCAmelCase__ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Any = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
snake_case : int = BasicExample(**UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Any = HfArgumentParser(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
| 84 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : Union[str, Any] = logging.getLogger(__name__)
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class a_ :
A__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class a_ :
A__ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
A__ : str = field(metadata={'help': 'Should contain the data files for the task.'} )
A__ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A__ : bool = field(
default=a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case , snake_case , snake_case : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __magic_name__ )
# Set seed
set_seed(training_args.seed )
try:
snake_case : int = processors[data_args.task_name]()
snake_case : List[str] = processor.get_labels()
snake_case : str = len(__magic_name__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case : Optional[int] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__magic_name__ ) -> Dict:
snake_case : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__magic_name__ , p.label_ids )}
# Data collator
snake_case : Dict = DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case : List[Any] = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case : Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case : Optional[Any] = trainer.evaluate()
snake_case : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__magic_name__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__magic_name__ )
return results
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 84 | 1 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
snake_case : str = 0
snake_case : Any = str(__magic_name__ )
while len(__magic_name__ ) != 1:
snake_case : int = [int(__magic_name__ ) for i in num_string]
snake_case : Tuple = 1
for i in range(0 , len(__magic_name__ ) ):
total *= numbers[i]
snake_case : List[Any] = str(__magic_name__ )
steps += 1
return steps
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
snake_case : str = 0
snake_case : Tuple = str(__magic_name__ )
while len(__magic_name__ ) != 1:
snake_case : Any = [int(__magic_name__ ) for i in num_string]
snake_case : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
total += numbers[i]
snake_case : List[str] = str(__magic_name__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
import re
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
snake_case : List[str] = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
_a : Any = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 84 | 1 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_a : Any = logging.get_logger(__name__)
class a_ ( a ):
A__ : Union[str, Any] = ['input_features', 'attention_mask']
def __init__( self : List[str] , UpperCAmelCase__ : List[str]=80 , UpperCAmelCase__ : str=16_000 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Union[str, Any]=10 , UpperCAmelCase__ : str=25 , UpperCAmelCase__ : List[str]="hamming_window" , UpperCAmelCase__ : Union[str, Any]=3_2768.0 , UpperCAmelCase__ : List[str]=0.97 , UpperCAmelCase__ : Optional[Any]=1.0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[Any]=False , **UpperCAmelCase__ : Optional[int] , ):
"""simple docstring"""
super().__init__(feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ )
snake_case : Dict = feature_size
snake_case : Optional[Any] = sampling_rate
snake_case : Dict = padding_value
snake_case : str = hop_length
snake_case : Dict = win_length
snake_case : Optional[Any] = frame_signal_scale
snake_case : Optional[Any] = preemphasis_coeff
snake_case : Optional[int] = mel_floor
snake_case : List[Any] = normalize_means
snake_case : Optional[Any] = normalize_vars
snake_case : List[str] = win_function
snake_case : Any = return_attention_mask
snake_case : Tuple = win_length * sampling_rate // 1_000
snake_case : int = hop_length * sampling_rate // 1_000
snake_case : Optional[Any] = optimal_fft_length(self.sample_size )
snake_case : List[Any] = (self.n_fft // 2) + 1
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : np.array ):
"""simple docstring"""
if self.win_function == "hamming_window":
snake_case : int = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCAmelCase__ )
else:
snake_case : int = window_function(window_length=self.sample_size , name=self.win_function )
snake_case : Tuple = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
snake_case : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=UpperCAmelCase__ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=UpperCAmelCase__ , preemphasis=self.preemphasis_coeff , mel_filters=UpperCAmelCase__ , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def lowerCAmelCase( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
# make sure we normalize float32 arrays
if self.normalize_means:
snake_case : Optional[Any] = x[:input_length].mean(axis=0 )
snake_case : List[Any] = np.subtract(UpperCAmelCase__ , UpperCAmelCase__ )
if self.normalize_vars:
snake_case : List[str] = x[:input_length].std(axis=0 )
snake_case : Dict = np.divide(UpperCAmelCase__ , UpperCAmelCase__ )
if input_length < x.shape[0]:
snake_case : Any = padding_value
# make sure array is in float32
snake_case : Union[str, Any] = x.astype(np.floataa )
return x
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[np.ndarray] , UpperCAmelCase__ : Optional[np.ndarray] = None ):
"""simple docstring"""
snake_case : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(UpperCAmelCase__ , UpperCAmelCase__ , self.padding_value ) for x, n in zip(UpperCAmelCase__ , UpperCAmelCase__ )]
def __call__( self : Optional[Any] , UpperCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[int] = None , **UpperCAmelCase__ : Optional[Any] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
snake_case : int = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
snake_case : List[Any] = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case : Any = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
snake_case : str = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case : str = [raw_speech]
# extract fbank features
snake_case : Union[str, Any] = [self._extract_mfsc_features(UpperCAmelCase__ ) for one_waveform in raw_speech]
# convert into correct format for padding
snake_case : Union[str, Any] = BatchFeature({'''input_features''': features} )
snake_case : str = self.pad(
UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
# make sure list is in array format
snake_case : str = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , UpperCAmelCase__ ):
snake_case : int = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in input_features]
snake_case : int = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
snake_case : str = [np.asarray(UpperCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
snake_case : Tuple = (
np.array(UpperCAmelCase__ , dtype=np.intaa )
if self._get_padding_strategies(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
snake_case : Optional[Any] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=UpperCAmelCase__ )
if return_tensors is not None:
snake_case : Optional[Any] = padded_inputs.convert_to_tensors(UpperCAmelCase__ )
return padded_inputs
| 84 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : int=18 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Optional[int]=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=True , ):
"""simple docstring"""
snake_case : int = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = num_channels
snake_case : Union[str, Any] = image_size
snake_case : Dict = min_resolution
snake_case : Dict = max_resolution
snake_case : int = do_resize
snake_case : List[str] = size
snake_case : List[Any] = apply_ocr
def lowerCAmelCase( self : int ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a_ ( a , unittest.TestCase ):
A__ : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''apply_ocr''' ) )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
# Initialize image_processing
snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
snake_case : Dict = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : List[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : Tuple = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# with apply_OCR = True
snake_case : int = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case : List[Any] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
snake_case : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
snake_case : Any = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case : Optional[Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
snake_case : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
snake_case : str = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
snake_case : Optional[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 84 | 1 |
def a_ ( __magic_name__ , __magic_name__ ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__magic_name__ ) , __magic_name__ )
return number - int(__magic_name__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 84 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=8 ) -> str:
"""simple docstring"""
snake_case : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if latents is None:
snake_case : int = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case : Optional[Any] = latents.to(UpperCAmelCase__ )
snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case : Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
snake_case : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Optional[int] = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
snake_case : Optional[int] = self._execution_device
snake_case : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Any = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : int = torch.cat(UpperCAmelCase__ , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Dict = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Tuple = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
snake_case : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
snake_case : str = self.scheduler.timesteps
snake_case : Optional[Any] = self.movq.config.latent_channels
snake_case , snake_case : Optional[Any] = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
snake_case : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Optional[int] = {'''image_embeds''': image_embeds, '''hint''': hint}
snake_case : Any = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Any = noise_pred.chunk(2 )
snake_case , snake_case : Dict = variance_pred.chunk(2 )
snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
snake_case : List[Any] = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case : Optional[Any] = image * 0.5 + 0.5
snake_case : int = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : str = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a_ ( __magic_name__ ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> np.ndarray:
"""simple docstring"""
snake_case : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__magic_name__ , __magic_name__ )
# Predict target for test data
snake_case : Union[str, Any] = xgb.predict(__magic_name__ )
snake_case : str = predictions.reshape(len(__magic_name__ ) , 1 )
return predictions
def a_ ( ) -> None:
"""simple docstring"""
snake_case : List[str] = fetch_california_housing()
snake_case , snake_case : Optional[int] = data_handling(__magic_name__ )
snake_case , snake_case , snake_case , snake_case : Optional[Any] = train_test_split(
__magic_name__ , __magic_name__ , test_size=0.25 , random_state=1 )
snake_case : Tuple = xgboost(__magic_name__ , __magic_name__ , __magic_name__ )
# Error printing
print(F"Mean Absolute Error : {mean_absolute_error(__magic_name__ , __magic_name__ )}" )
print(F"Mean Square Error : {mean_squared_error(__magic_name__ , __magic_name__ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 84 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a_ ( a , unittest.TestCase ):
A__ : Dict = ReformerTokenizer
A__ : Optional[int] = ReformerTokenizerFast
A__ : str = True
A__ : Tuple = False
A__ : str = True
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
super().setUp()
snake_case : str = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = '''<s>'''
snake_case : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1_000 )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case : Any = self.get_tokenizer()
snake_case : str = self.get_rust_tokenizer()
snake_case : Tuple = '''I was born in 92000, and this is falsé.'''
snake_case : str = tokenizer.tokenize(UpperCAmelCase__ )
snake_case : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
snake_case : List[str] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : Optional[int] = tokenizer.encode(UpperCAmelCase__ )
snake_case : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any]=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : str = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# Simple input
snake_case : Union[str, Any] = '''This is a simple input'''
snake_case : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case : int = ('''This is a simple input''', '''This is a pair''')
snake_case : int = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
snake_case : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
snake_case : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case : int = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Any = '''Hello World!'''
snake_case : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case : Union[str, Any] = ''' '''.join(UpperCAmelCase__ )
snake_case : Optional[int] = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' )
snake_case : List[str] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
snake_case : Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case : Tuple = encoded_sequence['''input_ids'''].shape
snake_case : List[Any] = ReformerModel(UpperCAmelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
# fmt: off
snake_case : Tuple = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case : Tuple = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=UpperCAmelCase__ , sequences=UpperCAmelCase__ , )
| 84 | 1 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
_a : Optional[int] = logging.get_logger(__name__)
_a : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_a : int = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_a : List[Any] = {
'facebook/blenderbot_small-90M': 512,
}
class a_ ( a ):
A__ : int = VOCAB_FILES_NAMES
A__ : Dict = PRETRAINED_VOCAB_FILES_MAP
A__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = BlenderbotSmallTokenizer
def __init__( self : List[Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[Any]="<|endoftext|>" , UpperCAmelCase__ : List[Any]="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[int]=True , **UpperCAmelCase__ : Any , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case : Tuple = add_prefix_space
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any]=None ):
"""simple docstring"""
snake_case : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : Optional[int] = [self.sep_token_id]
snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 84 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case , snake_case : Any = image.size
snake_case , snake_case : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case : Dict = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
snake_case : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case : Tuple = torch.from_numpy(__magic_name__ )
return 2.0 * image - 1.0
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : VQModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[int] = 100 , UpperCAmelCase__ : Optional[float] = 0.0 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[int] = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
snake_case : Any = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase__ )}" )
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[Any] = preprocess(UpperCAmelCase__ )
snake_case , snake_case : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case : str = next(self.unet.parameters() ).dtype
snake_case : Dict = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
snake_case : Any = image.to(device=self.device , dtype=UpperCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device )
snake_case : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_eta:
snake_case : Dict = eta
for t in self.progress_bar(UpperCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
snake_case : Optional[int] = torch.cat([latents, image] , dim=1 )
snake_case : str = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
snake_case : int = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : Any = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
snake_case : Optional[int] = self.vqvae.decode(UpperCAmelCase__ ).sample
snake_case : int = torch.clamp(UpperCAmelCase__ , -1.0 , 1.0 )
snake_case : Dict = image / 2 + 0.5
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Any = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 1 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Dict = ''''''
snake_case : Optional[Any] = ''''''
snake_case : List[str] = []
snake_case : Dict = 0
snake_case : int = 256
snake_case : int = 0
snake_case : Optional[Any] = 0
snake_case : Dict = 0
snake_case : int = 0
def lowerCAmelCase( self : int , UpperCAmelCase__ : Dict ):
"""simple docstring"""
snake_case : Tuple = cva.imread(UpperCAmelCase__ , 0 )
snake_case : str = copy.deepcopy(self.img )
snake_case , snake_case , snake_case : Optional[int] = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
snake_case : int = np.sum(UpperCAmelCase__ )
for i in range(len(UpperCAmelCase__ ) ):
snake_case : List[str] = x[i] / self.k
self.sk += prk
snake_case : int = (self.L - 1) * self.sk
if self.rem != 0:
snake_case : List[Any] = int(last % last )
snake_case : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(UpperCAmelCase__ )
snake_case : int = int(np.ma.count(self.img ) / self.img[1].size )
snake_case : Optional[Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case : Any = self.img[j][i]
if num != self.last_list[num]:
snake_case : Optional[Any] = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_a : Any = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
_a : int = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 84 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class a_ ( a ):
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Dict = 5
# Realm tok
snake_case : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
snake_case : Any = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[int] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Dict = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=UpperCAmelCase__ , )
return block_records
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Tuple = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = self.get_config()
snake_case : Optional[Any] = self.get_dummy_retriever()
snake_case : Optional[int] = retriever.tokenizer
snake_case : Dict = np.array([0, 3] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Union[str, Any] = tokenizer(
['''the fourth'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : Optional[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : List[str] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.get_config()
snake_case : Optional[int] = self.get_dummy_retriever()
snake_case : List[str] = retriever.tokenizer
snake_case : Optional[Any] = np.array([0, 3, 5] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Any = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : List[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : Union[str, Any] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual([False, True, True] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
snake_case : Optional[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
snake_case : Any = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case : Any = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 84 | 1 |
def a_ ( __magic_name__ = 50 ) -> int:
"""simple docstring"""
snake_case : Dict = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"{solution() = }")
| 84 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : str = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class a_ ( a ):
A__ : jnp.ndarray
A__ : jnp.ndarray
class a_ ( nn.Module ):
A__ : int
A__ : Tuple[int] = (16, 32, 96, 256)
A__ : jnp.dtype = jnp.floataa
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[str] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case : Union[str, Any] = []
for i in range(len(self.block_out_channels ) - 1 ):
snake_case : Any = self.block_out_channels[i]
snake_case : List[Any] = self.block_out_channels[i + 1]
snake_case : Any = nn.Conv(
UpperCAmelCase__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCAmelCase__ )
snake_case : Optional[Any] = nn.Conv(
UpperCAmelCase__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCAmelCase__ )
snake_case : Union[str, Any] = blocks
snake_case : Any = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : int , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = self.conv_in(UpperCAmelCase__ )
snake_case : Dict = nn.silu(UpperCAmelCase__ )
for block in self.blocks:
snake_case : Any = block(UpperCAmelCase__ )
snake_case : Dict = nn.silu(UpperCAmelCase__ )
snake_case : List[str] = self.conv_out(UpperCAmelCase__ )
return embedding
@flax_register_to_config
class a_ ( nn.Module , a , a ):
A__ : int = 32
A__ : int = 4
A__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
A__ : Union[bool, Tuple[bool]] = False
A__ : Tuple[int] = (320, 640, 1280, 1280)
A__ : int = 2
A__ : Union[int, Tuple[int]] = 8
A__ : Optional[Union[int, Tuple[int]]] = None
A__ : int = 1280
A__ : float = 0.0
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
A__ : bool = True
A__ : int = 0
A__ : str = "rgb"
A__ : Tuple[int] = (16, 32, 96, 256)
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : jax.random.KeyArray ):
"""simple docstring"""
# init input tensors
snake_case : Optional[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case : List[str] = jnp.zeros(UpperCAmelCase__ , dtype=jnp.floataa )
snake_case : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
snake_case : Union[str, Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case : Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
snake_case : Tuple = jnp.zeros(UpperCAmelCase__ , dtype=jnp.floataa )
snake_case , snake_case : Union[str, Any] = jax.random.split(UpperCAmelCase__ )
snake_case : Any = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )["params"]
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[Any] = self.block_out_channels
snake_case : int = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case : int = self.num_attention_heads or self.attention_head_dim
# input
snake_case : Any = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case : Dict = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case : int = FlaxTimestepEmbedding(UpperCAmelCase__ , dtype=self.dtype )
snake_case : List[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
snake_case : Any = self.only_cross_attention
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Optional[int] = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case : Any = []
snake_case : List[str] = []
snake_case : Any = block_out_channels[0]
snake_case : int = nn.Conv(
UpperCAmelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCAmelCase__ )
for i, down_block_type in enumerate(self.down_block_types ):
snake_case : Union[str, Any] = output_channel
snake_case : Any = block_out_channels[i]
snake_case : Dict = i == len(UpperCAmelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case : Tuple = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
snake_case : int = FlaxDownBlockaD(
in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCAmelCase__ )
for _ in range(self.layers_per_block ):
snake_case : Dict = nn.Conv(
UpperCAmelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCAmelCase__ )
if not is_final_block:
snake_case : Optional[int] = nn.Conv(
UpperCAmelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCAmelCase__ )
snake_case : str = down_blocks
snake_case : List[str] = controlnet_down_blocks
# mid
snake_case : str = block_out_channels[-1]
snake_case : str = FlaxUNetMidBlockaDCrossAttn(
in_channels=UpperCAmelCase__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
snake_case : Tuple = nn.Conv(
UpperCAmelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : float = 1.0 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = False , ):
"""simple docstring"""
snake_case : Tuple = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
snake_case : Dict = jnp.flip(UpperCAmelCase__ , axis=1 )
# 1. time
if not isinstance(UpperCAmelCase__ , jnp.ndarray ):
snake_case : Optional[int] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCAmelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case : Union[str, Any] = timesteps.astype(dtype=jnp.floataa )
snake_case : List[Any] = jnp.expand_dims(UpperCAmelCase__ , 0 )
snake_case : Optional[Any] = self.time_proj(UpperCAmelCase__ )
snake_case : Union[str, Any] = self.time_embedding(UpperCAmelCase__ )
# 2. pre-process
snake_case : Union[str, Any] = jnp.transpose(UpperCAmelCase__ , (0, 2, 3, 1) )
snake_case : Optional[Any] = self.conv_in(UpperCAmelCase__ )
snake_case : List[Any] = jnp.transpose(UpperCAmelCase__ , (0, 2, 3, 1) )
snake_case : Optional[int] = self.controlnet_cond_embedding(UpperCAmelCase__ )
sample += controlnet_cond
# 3. down
snake_case : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case , snake_case : Optional[int] = down_block(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , deterministic=not train )
else:
snake_case , snake_case : Union[str, Any] = down_block(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
snake_case : Dict = self.mid_block(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , deterministic=not train )
# 5. contronet blocks
snake_case : List[str] = ()
for down_block_res_sample, controlnet_block in zip(UpperCAmelCase__ , self.controlnet_down_blocks ):
snake_case : Union[str, Any] = controlnet_block(UpperCAmelCase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
snake_case : Optional[Any] = controlnet_down_block_res_samples
snake_case : Dict = self.controlnet_mid_block(UpperCAmelCase__ )
# 6. scaling
snake_case : Tuple = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=UpperCAmelCase__ , mid_block_res_sample=UpperCAmelCase__ )
| 84 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_a : Union[str, Any] = {
'yjernite/retribert-base-uncased': 512,
}
_a : Tuple = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class a_ ( a ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Any = PRETRAINED_INIT_CONFIGURATION
A__ : Optional[Any] = RetriBertTokenizer
A__ : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict="[UNK]" , UpperCAmelCase__ : str="[SEP]" , UpperCAmelCase__ : Union[str, Any]="[PAD]" , UpperCAmelCase__ : Dict="[CLS]" , UpperCAmelCase__ : Optional[Any]="[MASK]" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Dict , ):
"""simple docstring"""
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
snake_case : int = getattr(UpperCAmelCase__ , normalizer_state.pop('''type''' ) )
snake_case : List[Any] = do_lower_case
snake_case : Union[str, Any] = strip_accents
snake_case : int = tokenize_chinese_chars
snake_case : int = normalizer_class(**UpperCAmelCase__ )
snake_case : Union[str, Any] = do_lower_case
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=None ):
"""simple docstring"""
snake_case : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : List[Any] = [self.sep_token_id]
snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
snake_case : Tuple = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 84 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : int = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class a_ ( a ):
A__ : Tuple = 'data2vec-text'
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Any=30_522 , UpperCAmelCase__ : List[str]=768 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : str=1e-1_2 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Optional[Any]="absolute" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
snake_case : List[str] = vocab_size
snake_case : List[str] = hidden_size
snake_case : str = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Optional[int] = hidden_act
snake_case : Optional[int] = intermediate_size
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = type_vocab_size
snake_case : Optional[int] = initializer_range
snake_case : List[str] = layer_norm_eps
snake_case : Any = position_embedding_type
snake_case : int = use_cache
snake_case : List[Any] = classifier_dropout
class a_ ( a ):
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
if self.task == "multiple-choice":
snake_case : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 84 |
import string
import numpy
def a_ ( __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , __magic_name__ )
class a_ :
A__ : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
A__ : List[str] = numpy.vectorize(lambda a : x % 36 )
A__ : Dict = numpy.vectorize(a )
def __init__( self : List[str] , UpperCAmelCase__ : numpy.ndarray ):
"""simple docstring"""
snake_case : int = self.modulus(UpperCAmelCase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
snake_case : List[str] = encrypt_key.shape[0]
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
return self.key_string.index(UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : int ):
"""simple docstring"""
return self.key_string[round(UpperCAmelCase__ )]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : Tuple = det % len(self.key_string )
snake_case : Tuple = len(self.key_string )
if greatest_common_divisor(UpperCAmelCase__ , len(self.key_string ) ) != 1:
snake_case : List[Any] = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = [char for char in text.upper() if char in self.key_string]
snake_case : Optional[int] = chars[-1]
while len(UpperCAmelCase__ ) % self.break_key != 0:
chars.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = self.process_text(text.upper() )
snake_case : Optional[int] = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : int = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : Tuple = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(self.encrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[
0
]
snake_case : Dict = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : int = det % len(self.key_string )
snake_case : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
snake_case : Any = i
break
snake_case : Any = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCAmelCase__ ) )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Any = self.make_decrypt_key()
snake_case : Optional[Any] = self.process_text(text.upper() )
snake_case : int = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : Any = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : List[str] = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(decrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[0]
snake_case : int = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a_ ( ) -> None:
"""simple docstring"""
snake_case : Any = int(input('''Enter the order of the encryption key: ''' ) )
snake_case : List[Any] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(__magic_name__ ):
snake_case : Optional[Any] = [int(__magic_name__ ) for x in input().split()]
hill_matrix.append(__magic_name__ )
snake_case : List[str] = HillCipher(numpy.array(__magic_name__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
snake_case : int = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
snake_case : List[Any] = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(__magic_name__ ) )
elif option == "2":
snake_case : int = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 84 | 1 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_a : int = logging.getLogger(__name__)
class a_ :
def __init__( self : List[str] ):
"""simple docstring"""
snake_case : Union[str, Any] = False
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any ):
"""simple docstring"""
if not self.initialized:
snake_case : Tuple = RagRetriever(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , index=UpperCAmelCase__ , init_retrieval=UpperCAmelCase__ , )
snake_case : str = True
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
self.retriever.index.init_index()
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ):
"""simple docstring"""
snake_case , snake_case : str = self.retriever._main_retrieve(UpperCAmelCase__ , UpperCAmelCase__ )
return doc_ids, retrieved_doc_embeds
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=None ):
"""simple docstring"""
if index is not None and index.is_initialized() and len(UpperCAmelCase__ ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , index=UpperCAmelCase__ , init_retrieval=UpperCAmelCase__ , )
snake_case : Union[str, Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
for worker in self.retrieval_workers
] )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] ):
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
snake_case : Tuple = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
snake_case , snake_case : Dict = ray.get(random_worker.retrieve.remote(UpperCAmelCase__ , UpperCAmelCase__ ) )
else:
snake_case , snake_case : List[Any] = self._main_retrieve(UpperCAmelCase__ , UpperCAmelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase__ )
@classmethod
def lowerCAmelCase( cls : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : str ):
"""simple docstring"""
return super(UpperCAmelCase__ , cls ).get_tokenizers(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
@classmethod
def lowerCAmelCase( cls : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = kwargs.pop('''config''' , UpperCAmelCase__ ) or RagConfig.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
snake_case : Any = RagTokenizer.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ )
snake_case : Tuple = rag_tokenizer.question_encoder
snake_case : int = rag_tokenizer.generator
if indexed_dataset is not None:
snake_case : Tuple = '''custom'''
snake_case : Union[str, Any] = CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase__ )
else:
snake_case : List[Any] = cls._build_index(UpperCAmelCase__ )
return cls(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , retrieval_workers=UpperCAmelCase__ , index=UpperCAmelCase__ , )
| 84 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( a ):
A__ : List[Any] = 'Salesforce/blip-image-captioning-base'
A__ : Dict = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
A__ : str = 'image_captioner'
A__ : Dict = AutoModelForVisionaSeq
A__ : Optional[Any] = ['image']
A__ : List[str] = ['text']
def __init__( self : List[str] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=UpperCAmelCase__ , return_tensors='''pt''' )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return self.model.generate(**UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0].strip()
| 84 | 1 |
class a_ :
def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
snake_case : Dict = name
snake_case : List[str] = val
def __str__( self : Dict ):
"""simple docstring"""
return F"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : Tuple , UpperCAmelCase__ : Any ):
"""simple docstring"""
return self.val < other.val
class a_ :
def __init__( self : int , UpperCAmelCase__ : Any ):
"""simple docstring"""
snake_case : Union[str, Any] = {}
snake_case : Any = {}
snake_case : Dict = self.build_heap(UpperCAmelCase__ )
def __getitem__( self : Optional[int] , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
return self.get_value(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return (idx - 1) // 2
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return idx * 2 + 1
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
return idx * 2 + 2
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
return self.heap_dict[key]
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Any ):
"""simple docstring"""
snake_case : int = len(UpperCAmelCase__ ) - 1
snake_case : List[str] = self.get_parent_idx(UpperCAmelCase__ )
for idx, i in enumerate(UpperCAmelCase__ ):
snake_case : Any = idx
snake_case : List[Any] = i.val
for i in range(UpperCAmelCase__ , -1 , -1 ):
self.sift_down(UpperCAmelCase__ , UpperCAmelCase__ )
return array
def lowerCAmelCase( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Any ):
"""simple docstring"""
while True:
snake_case : int = self.get_left_child_idx(UpperCAmelCase__ ) # noqa: E741
snake_case : str = self.get_right_child_idx(UpperCAmelCase__ )
snake_case : Optional[int] = idx
if l < len(UpperCAmelCase__ ) and array[l] < array[idx]:
snake_case : int = l
if r < len(UpperCAmelCase__ ) and array[r] < array[smallest]:
snake_case : Dict = r
if smallest != idx:
snake_case , snake_case : Dict = array[smallest], array[idx]
(
(
snake_case
) , (
snake_case
) ,
) : Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
snake_case : str = smallest
else:
break
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : int ):
"""simple docstring"""
snake_case : int = self.get_parent_idx(UpperCAmelCase__ )
while p >= 0 and self.heap[p] > self.heap[idx]:
snake_case , snake_case : str = self.heap[idx], self.heap[p]
snake_case , snake_case : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
snake_case : Any = p
snake_case : Dict = self.get_parent_idx(UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
return self.heap[0]
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case , snake_case : List[str] = self.heap[-1], self.heap[0]
snake_case , snake_case : Union[str, Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
snake_case : Any = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCAmelCase( self : str , UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
self.heap.append(UpperCAmelCase__ )
snake_case : Optional[Any] = len(self.heap ) - 1
snake_case : List[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
return len(self.heap ) == 0
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict ):
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
snake_case : List[Any] = new_value
snake_case : List[Any] = new_value
self.sift_up(self.idx_of_element[node] )
_a : List[str] = Node('R', -1)
_a : Union[str, Any] = Node('B', 6)
_a : Union[str, Any] = Node('A', 3)
_a : Optional[int] = Node('X', 1)
_a : List[Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_a : Tuple = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
snake_case : int = 4
snake_case : Optional[Any] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 84 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Any = inspect.getfile(accelerate.test_utils )
snake_case : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
snake_case : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
snake_case : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
print(F"Found {torch.cuda.device_count()} devices." )
snake_case : List[str] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
print(F"Found {torch.cuda.device_count()} devices." )
snake_case : List[Any] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Union[str, Any] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase( self : int ):
"""simple docstring"""
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
snake_case : List[str] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
_a : Dict = Accelerator()
_a : Optional[Any] = (accelerator.state.process_index + 2, 10)
_a : Dict = torch.randint(0, 10, shape).to(accelerator.device)
_a : Optional[Any] = ''
_a : Tuple = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_a : str = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_a : List[Any] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 84 |
from sklearn.metrics import fa_score
import datasets
_a : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
_a : Dict = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
_a : List[Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : List[str]="binary" , UpperCAmelCase__ : str=None ):
"""simple docstring"""
snake_case : List[Any] = fa_score(
UpperCAmelCase__ , UpperCAmelCase__ , labels=UpperCAmelCase__ , pos_label=UpperCAmelCase__ , average=UpperCAmelCase__ , sample_weight=UpperCAmelCase__ )
return {"f1": float(UpperCAmelCase__ ) if score.size == 1 else score}
| 84 | 1 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
snake_case : str = F"Input value of [number={number}] must be an integer"
raise TypeError(__magic_name__ )
if number < 1:
snake_case : Union[str, Any] = F"Input value of [number={number}] must be > 0"
raise ValueError(__magic_name__ )
snake_case : Union[str, Any] = 1
for i in range(1 , __magic_name__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('''only integers accepted as input''' )
else:
snake_case : str = str(abs(__magic_name__ ) )
snake_case : Optional[Any] = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int(''''''.join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 84 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_a : int = logging.get_logger(__name__)
_a : List[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
_a : str = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
for attribute in key.split('''.''' ):
snake_case : Any = getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
snake_case : Optional[Any] = getattr(__magic_name__ , __magic_name__ ).shape
else:
snake_case : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
snake_case : Tuple = value
elif weight_type == "weight_g":
snake_case : Tuple = value
elif weight_type == "weight_v":
snake_case : List[str] = value
elif weight_type == "bias":
snake_case : Union[str, Any] = value
elif weight_type == "running_mean":
snake_case : Any = value
elif weight_type == "running_var":
snake_case : Optional[int] = value
elif weight_type == "num_batches_tracked":
snake_case : List[Any] = value
elif weight_type == "inv_freq":
snake_case : str = value
else:
snake_case : Dict = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
snake_case : Optional[int] = []
snake_case : List[str] = fairseq_model.state_dict()
snake_case : List[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
snake_case : Dict = False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , )
snake_case : Tuple = True
else:
for key, mapped_key in MAPPING.items():
snake_case : Optional[int] = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
snake_case : List[str] = True
if "*" in mapped_key:
snake_case : List[str] = name.split(__magic_name__ )[0].split('''.''' )[-2]
snake_case : List[Any] = mapped_key.replace('''*''' , __magic_name__ )
if "pos_bias_u" in name:
snake_case : int = None
elif "pos_bias_v" in name:
snake_case : Optional[int] = None
elif "weight_g" in name:
snake_case : List[str] = '''weight_g'''
elif "weight_v" in name:
snake_case : Any = '''weight_v'''
elif "bias" in name:
snake_case : Any = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case : Optional[Any] = '''weight'''
elif "running_mean" in name:
snake_case : List[Any] = '''running_mean'''
elif "inv_freq" in name:
snake_case : Optional[int] = '''inv_freq'''
elif "running_var" in name:
snake_case : Optional[int] = '''running_var'''
elif "num_batches_tracked" in name:
snake_case : Optional[Any] = '''num_batches_tracked'''
else:
snake_case : Dict = None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(F"Unused weights: {unused_weights}" )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : Tuple = full_name.split('''conv_layers.''' )[-1]
snake_case : List[Any] = name.split('''.''' )
snake_case : int = int(items[0] )
snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
snake_case : Dict = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
snake_case : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
snake_case : Optional[int] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
snake_case : Tuple = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
snake_case : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act='''swish''' )
else:
snake_case : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
snake_case : Any = '''rotary'''
if is_finetuned:
if dict_path:
snake_case : Dict = Dictionary.load(__magic_name__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case : List[Any] = target_dict.pad_index
snake_case : Dict = target_dict.bos_index
snake_case : str = target_dict.eos_index
snake_case : List[Any] = len(target_dict.symbols )
snake_case : Optional[int] = os.path.join(__magic_name__ , '''vocab.json''' )
if not os.path.isdir(__magic_name__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) )
return
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
snake_case : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case : Union[str, Any] = 0
snake_case : List[str] = 1
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__magic_name__ , __magic_name__ )
snake_case : str = WavaVecaCTCTokenizer(
__magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , )
snake_case : Dict = True if config.feat_extract_norm == '''layer''' else False
snake_case : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , )
snake_case : str = WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
processor.save_pretrained(__magic_name__ )
snake_case : Optional[int] = WavaVecaConformerForCTC(__magic_name__ )
else:
snake_case : Optional[Any] = WavaVecaConformerForPreTraining(__magic_name__ )
if is_finetuned:
snake_case , snake_case , snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
snake_case : Optional[Any] = argparse.Namespace(task='''audio_pretraining''' )
snake_case : Dict = fairseq.tasks.setup_task(__magic_name__ )
snake_case , snake_case , snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ )
snake_case : Dict = model[0].eval()
recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned )
hf_wavavec.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_a : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 84 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=99 , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Any=9 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : str=0.002 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , ):
"""simple docstring"""
snake_case : Union[str, Any] = parent
snake_case : Union[str, Any] = batch_size
snake_case : Any = encoder_seq_length
snake_case : str = decoder_seq_length
# For common tests
snake_case : Optional[int] = self.decoder_seq_length
snake_case : Optional[Any] = is_training
snake_case : List[Any] = use_attention_mask
snake_case : Union[str, Any] = use_labels
snake_case : Any = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : Any = d_ff
snake_case : Any = relative_attention_num_buckets
snake_case : Optional[Any] = dropout_rate
snake_case : int = initializer_factor
snake_case : Optional[Any] = eos_token_id
snake_case : Dict = pad_token_id
snake_case : Optional[Any] = decoder_start_token_id
snake_case : Union[str, Any] = None
snake_case : List[str] = decoder_layers
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
return TaConfig.from_pretrained('''google/umt5-base''' )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
snake_case : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case : List[Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if decoder_head_mask is None:
snake_case : Tuple = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if cross_attn_head_mask is None:
snake_case : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case : List[str] = input_ids.clamp(self.pad_token_id + 1 )
snake_case : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case : str = self.get_config()
snake_case : Tuple = config.num_attention_heads
snake_case : List[Any] = self.prepare_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, input_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , ):
"""simple docstring"""
snake_case : str = UMTaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : str = model(
input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , )
snake_case : int = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
snake_case : int = result.last_hidden_state
snake_case : Dict = result.past_key_values
snake_case : Dict = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval()
# first forward pass
snake_case : List[Any] = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
snake_case : List[Any] = model(UpperCAmelCase__ )
snake_case : Any = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) + 1 )
snake_case , snake_case : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : Any = model(UpperCAmelCase__ )['''last_hidden_state''']
snake_case : Optional[Any] = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )['''last_hidden_state''']
# select random slice
snake_case : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).half().eval()
snake_case : str = model(**UpperCAmelCase__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(UpperCAmelCase__ ).any().item() )
@require_torch
class a_ ( a , a , a , unittest.TestCase ):
A__ : str = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A__ : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A__ : Any = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A__ : Dict = True
A__ : List[str] = False
A__ : Optional[int] = False
A__ : Optional[int] = True
A__ : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A__ : int = [0.8, 0.9]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
snake_case : Optional[Any] = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=UpperCAmelCase__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Optional[int] = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
snake_case : int = config_and_inputs[0]
snake_case : Union[str, Any] = UMTaForConditionalGeneration(UpperCAmelCase__ ).eval()
model.to(UpperCAmelCase__ )
snake_case : str = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase__ , head_masking.items() ):
snake_case : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case : List[str] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ )
snake_case : Union[str, Any] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , **UpperCAmelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=UpperCAmelCase__ ).to(UpperCAmelCase__ )
snake_case : int = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=UpperCAmelCase__ , legacy=UpperCAmelCase__ )
snake_case : List[str] = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
snake_case : Dict = tokenizer(UpperCAmelCase__ , return_tensors='''pt''' , padding=UpperCAmelCase__ ).input_ids
# fmt: off
snake_case : Optional[Any] = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = model.generate(input_ids.to(UpperCAmelCase__ ) )
snake_case : int = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
snake_case : Tuple = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 84 | 1 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class a_ :
A__ : Optional[int] = BlenderbotConfig
A__ : str = {}
A__ : List[Any] = 'gelu'
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any=13 , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Tuple=37 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[Any]=20 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : List[str]=0 , ):
"""simple docstring"""
snake_case : Tuple = parent
snake_case : Optional[int] = batch_size
snake_case : List[str] = seq_length
snake_case : int = is_training
snake_case : List[str] = use_labels
snake_case : int = vocab_size
snake_case : str = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : int = intermediate_size
snake_case : str = hidden_dropout_prob
snake_case : Optional[int] = attention_probs_dropout_prob
snake_case : Union[str, Any] = max_position_embeddings
snake_case : Optional[Any] = eos_token_id
snake_case : str = pad_token_id
snake_case : Union[str, Any] = bos_token_id
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case : Union[str, Any] = prepare_blenderbot_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : List[str] = TFBlenderbotModel(config=UpperCAmelCase__ ).get_decoder()
snake_case : Tuple = inputs_dict['''input_ids''']
snake_case : Any = input_ids[:1, :]
snake_case : List[str] = inputs_dict['''attention_mask'''][:1, :]
snake_case : Optional[int] = inputs_dict['''head_mask''']
snake_case : List[Any] = 1
# first forward pass
snake_case : List[str] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
snake_case , snake_case : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case : Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case : Any = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case : Optional[Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
snake_case : Tuple = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
snake_case : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-3 )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , ) -> Optional[int]:
"""simple docstring"""
if attention_mask is None:
snake_case : Tuple = tf.cast(tf.math.not_equal(__magic_name__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a_ ( a , a , unittest.TestCase ):
A__ : Union[str, Any] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
A__ : str = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
A__ : Optional[Any] = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
A__ : Dict = True
A__ : Optional[Any] = False
A__ : List[Any] = False
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : int = TFBlenderbotModelTester(self )
snake_case : Dict = ConfigTester(self , config_class=UpperCAmelCase__ )
def lowerCAmelCase( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase__ )
@require_tokenizers
@require_tf
class a_ ( unittest.TestCase ):
A__ : Optional[int] = ['My friends are cool but they eat too many carbs.']
A__ : Optional[int] = 'facebook/blenderbot-400M-distill'
@cached_property
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Tuple = self.tokenizer(self.src_text , return_tensors='''tf''' )
snake_case : str = self.model.generate(
model_inputs.input_ids , )
snake_case : Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCAmelCase__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 84 |
import torch
from diffusers import DiffusionPipeline
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
def __call__( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
snake_case : Dict = 1
snake_case : Optional[Any] = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
snake_case : List[Any] = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
snake_case : List[Any] = scheduler_output - scheduler_output + torch.ones_like(UpperCAmelCase__ )
return result
| 84 | 1 |
import colorsys
from PIL import Image # type: ignore
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> float:
"""simple docstring"""
snake_case : List[Any] = x
snake_case : Dict = y
for step in range(__magic_name__ ): # noqa: B007
snake_case : str = a * a - b * b + x
snake_case : int = 2 * a * b + y
snake_case : Union[str, Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def a_ ( __magic_name__ ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def a_ ( __magic_name__ ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__magic_name__ , 1 , 1 ) )
def a_ ( __magic_name__ = 800 , __magic_name__ = 600 , __magic_name__ = -0.6 , __magic_name__ = 0 , __magic_name__ = 3.2 , __magic_name__ = 50 , __magic_name__ = True , ) -> Image.Image:
"""simple docstring"""
snake_case : str = Image.new('''RGB''' , (image_width, image_height) )
snake_case : Any = img.load()
# loop through the image-coordinates
for image_x in range(__magic_name__ ):
for image_y in range(__magic_name__ ):
# determine the figure-coordinates based on the image-coordinates
snake_case : List[Any] = figure_width / image_width * image_height
snake_case : Optional[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
snake_case : List[str] = figure_center_y + (image_y / image_height - 0.5) * figure_height
snake_case : Tuple = get_distance(__magic_name__ , __magic_name__ , __magic_name__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
snake_case : Optional[int] = get_color_coded_rgb(__magic_name__ )
else:
snake_case : str = get_black_and_white_rgb(__magic_name__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_a : Tuple = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 84 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( a ):
A__ : List[str] = ['image_processor', 'tokenizer']
A__ : Any = 'CLIPImageProcessor'
A__ : Optional[int] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase__ , )
snake_case : List[Any] = kwargs.pop('''feature_extractor''' )
snake_case : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self : Any , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case : int = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if images is not None:
snake_case : Dict = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and images is not None:
snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : str ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : int = self.tokenizer.model_input_names
snake_case : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase__ , )
return self.image_processor_class
@property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase__ , )
return self.image_processor
| 84 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_a : List[Any] = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__magic_name__ )
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case : Tuple = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ )
| 84 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_a : Optional[Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
_a : str = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
_a : List[Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Optional[int]=0.9 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : List[Any]=500 , UpperCAmelCase__ : Union[str, Any]="gpt2-large" , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : List[Any]=25 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=25 , ):
"""simple docstring"""
snake_case : List[str] = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 84 | 1 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_a : Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = 16_000 ) -> Any:
"""simple docstring"""
snake_case : str = int(round(sample_rate * max_length ) )
if len(__magic_name__ ) <= sample_length:
return wav
snake_case : int = randint(0 , len(__magic_name__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class a_ :
A__ : Optional[str] = field(default=a , metadata={'help': 'Name of a dataset from the datasets package'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'A file containing the training audio paths and labels.'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'A file containing the validation audio paths and labels.'} )
A__ : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
A__ : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
A__ : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
A__ : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
A__ : Optional[int] = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A__ : Optional[int] = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
A__ : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class a_ :
A__ : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
A__ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Name or path of preprocessor config.'} )
A__ : bool = field(
default=a , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
A__ : bool = field(
default=a , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
A__ : bool = field(
default=a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A__ : Optional[bool] = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
A__ : bool = field(
default=a , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCAmelCase( self : str ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , UpperCAmelCase__ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def a_ ( ) -> str:
"""simple docstring"""
snake_case : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case , snake_case , snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case , snake_case , snake_case : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , __magic_name__ , __magic_name__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(__magic_name__ )
transformers.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
snake_case : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
snake_case : Any = DatasetDict()
snake_case : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
snake_case : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
snake_case : Optional[int] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
snake_case : int = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
snake_case : int = feature_extractor.model_input_names[0]
def train_transforms(__magic_name__ ):
snake_case : Optional[Any] = []
for audio in batch[data_args.audio_column_name]:
snake_case : Any = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__magic_name__ )
snake_case : Dict = feature_extractor(__magic_name__ , sampling_rate=feature_extractor.sampling_rate )
snake_case : Optional[Any] = {model_input_name: inputs.get(__magic_name__ )}
snake_case : List[Any] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__magic_name__ ):
snake_case : str = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
snake_case : Union[str, Any] = feature_extractor(__magic_name__ , sampling_rate=feature_extractor.sampling_rate )
snake_case : str = {model_input_name: inputs.get(__magic_name__ )}
snake_case : Dict = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case : int = raw_datasets['''train'''].features[data_args.label_column_name].names
snake_case , snake_case : int = {}, {}
for i, label in enumerate(__magic_name__ ):
snake_case : str = str(__magic_name__ )
snake_case : Optional[int] = label
# Load the accuracy metric from the datasets package
snake_case : Union[str, Any] = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__magic_name__ ):
snake_case : Any = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__magic_name__ , references=eval_pred.label_ids )
snake_case : Tuple = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__magic_name__ ) , labelaid=__magic_name__ , idalabel=__magic_name__ , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case : int = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case : Dict = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__magic_name__ , output_all_columns=__magic_name__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case : Dict = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__magic_name__ , output_all_columns=__magic_name__ )
# Initialize our trainer
snake_case : str = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=__magic_name__ , tokenizer=__magic_name__ , )
# Training
if training_args.do_train:
snake_case : List[Any] = None
if training_args.resume_from_checkpoint is not None:
snake_case : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case : Optional[int] = last_checkpoint
snake_case : List[str] = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case : Dict = trainer.evaluate()
trainer.log_metrics('''eval''' , __magic_name__ )
trainer.save_metrics('''eval''' , __magic_name__ )
# Write model card and (optionally) push to hub
snake_case : Tuple = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__magic_name__ )
else:
trainer.create_model_card(**__magic_name__ )
if __name__ == "__main__":
main()
| 84 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
if "cls_token" in name:
snake_case : Tuple = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
snake_case : Optional[int] = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
snake_case : List[str] = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case : List[Any] = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case : int = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
snake_case : int = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
snake_case : Optional[Any] = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
snake_case : str = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case : Any = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
snake_case : Dict = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
snake_case : Dict = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
snake_case : Dict = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
snake_case : Optional[int] = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def a_ ( __magic_name__ , __magic_name__ ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case : Union[str, Any] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
snake_case : Optional[int] = key.split('''.''' )
snake_case : int = int(key_split[1] )
if "decoder_blocks" in key:
snake_case : List[str] = config.decoder_hidden_size
snake_case : List[Any] = '''decoder.decoder_layers.'''
if "weight" in key:
snake_case : str = val[:dim, :]
snake_case : Optional[Any] = val[dim : dim * 2, :]
snake_case : Any = val[-dim:, :]
elif "bias" in key:
snake_case : Optional[Any] = val[:dim]
snake_case : List[Any] = val[dim : dim * 2]
snake_case : List[Any] = val[-dim:]
else:
snake_case : Optional[int] = config.hidden_size
snake_case : Tuple = '''vit.encoder.layer.'''
if "weight" in key:
snake_case : Optional[Any] = val[:dim, :]
snake_case : str = val[dim : dim * 2, :]
snake_case : Union[str, Any] = val[-dim:, :]
elif "bias" in key:
snake_case : Tuple = val[:dim]
snake_case : int = val[dim : dim * 2]
snake_case : Optional[Any] = val[-dim:]
else:
snake_case : Optional[Any] = val
return orig_state_dict
def a_ ( __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : List[str] = ViTMAEConfig()
if "large" in checkpoint_url:
snake_case : str = 1_024
snake_case : Tuple = 4_096
snake_case : Optional[Any] = 24
snake_case : List[Any] = 16
elif "huge" in checkpoint_url:
snake_case : Tuple = 14
snake_case : int = 1_280
snake_case : Dict = 5_120
snake_case : Tuple = 32
snake_case : Optional[Any] = 16
snake_case : Optional[Any] = ViTMAEForPreTraining(__magic_name__ )
snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''model''']
snake_case : int = ViTMAEImageProcessor(size=config.image_size )
snake_case : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
snake_case : Tuple = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
snake_case : List[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
snake_case : Dict = ViTMAEImageProcessor(size=config.image_size )
snake_case : str = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
snake_case : Union[str, Any] = model(**__magic_name__ )
snake_case : Optional[Any] = outputs.logits
if "large" in checkpoint_url:
snake_case : Any = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
snake_case : List[Any] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
snake_case : Dict = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a : str = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 84 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( a ):
A__ : List[Any] = 'Salesforce/blip-image-captioning-base'
A__ : Dict = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
A__ : str = 'image_captioner'
A__ : Dict = AutoModelForVisionaSeq
A__ : Optional[Any] = ['image']
A__ : List[str] = ['text']
def __init__( self : List[str] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=UpperCAmelCase__ , return_tensors='''pt''' )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return self.model.generate(**UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0].strip()
| 84 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : Optional[Any] = 16
_a : Union[str, Any] = 32
def a_ ( __magic_name__ , __magic_name__ = 16 ) -> Dict:
"""simple docstring"""
snake_case : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case : Union[str, Any] = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case : Dict = 8
else:
snake_case : Union[str, Any] = None
return tokenizer.pad(
__magic_name__ , padding='''longest''' , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
snake_case : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : Optional[int] = mocked_dataloaders # noqa: F811
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __magic_name__ ) == "1":
snake_case : Optional[int] = 2
# Initialize accelerator
snake_case : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : Dict = config['''lr''']
snake_case : Any = int(config['''num_epochs'''] )
snake_case : List[str] = int(config['''seed'''] )
snake_case : List[Any] = int(config['''batch_size'''] )
snake_case : Tuple = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : str = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case : Optional[int] = AdamW(params=model.parameters() , lr=__magic_name__ )
snake_case , snake_case : List[Any] = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate scheduler
snake_case : int = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case : Tuple = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case : int = model(**__magic_name__ )
snake_case : Optional[int] = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case : List[str] = model(**__magic_name__ )
snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
snake_case , snake_case : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
snake_case : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__magic_name__ , default=__magic_name__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case : Optional[Any] = parser.parse_args()
snake_case : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 84 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = AutoConfig.from_pretrained(__magic_name__ )
snake_case : Dict = FlaxAutoModelForSeqaSeqLM.from_config(config=__magic_name__ )
snake_case : Any = checkpoints.load_tax_checkpoint(__magic_name__ )
snake_case : int = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
snake_case : Any = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
snake_case : str = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case : Optional[Any] = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
snake_case : List[Any] = F"layers_{str(__magic_name__ )}"
# Self-Attention
snake_case : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
snake_case : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
snake_case : Any = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
snake_case : Any = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
snake_case : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
snake_case : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
snake_case : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
snake_case : List[Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
snake_case : Dict = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
snake_case : Any = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
snake_case : Optional[int] = flax_model.params['''encoder''']['''block'''][str(__magic_name__ )]['''layer''']
snake_case : Dict = tax_attention_key
snake_case : Union[str, Any] = tax_attention_out
snake_case : List[Any] = tax_attention_query
snake_case : Dict = tax_attention_value
snake_case : List[Any] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case : Union[str, Any] = tax_global_layer_norm
if split_mlp_wi:
snake_case : str = tax_mlp_wi_a
snake_case : List[str] = tax_mlp_wi_a
else:
snake_case : List[str] = tax_mlp_wi
snake_case : Any = tax_mlp_wo
snake_case : List[Any] = tax_mlp_layer_norm
snake_case : Dict = flax_model_encoder_layer_block
# Only for layer 0:
snake_case : Optional[int] = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
snake_case : Union[str, Any] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case : Any = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
snake_case : Optional[Any] = tax_encoder_global_rel_embedding
# Assigning
snake_case : Tuple = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
snake_case : Optional[Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
snake_case : List[str] = F"layers_{str(__magic_name__ )}"
# Self-Attention
snake_case : Any = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
snake_case : Any = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
snake_case : Any = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
snake_case : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
snake_case : str = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
snake_case : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
snake_case : Any = tax_enc_dec_attention_module['''key''']['''kernel''']
snake_case : int = tax_enc_dec_attention_module['''out''']['''kernel''']
snake_case : Tuple = tax_enc_dec_attention_module['''query''']['''kernel''']
snake_case : Optional[int] = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
snake_case : int = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
snake_case : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
snake_case : Dict = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
snake_case : Dict = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
snake_case : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
snake_case : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
snake_case : Union[str, Any] = flax_model.params['''decoder''']['''block'''][str(__magic_name__ )]['''layer''']
snake_case : Union[str, Any] = tax_attention_key
snake_case : Optional[int] = tax_attention_out
snake_case : Dict = tax_attention_query
snake_case : Optional[int] = tax_attention_value
snake_case : Union[str, Any] = tax_pre_attention_layer_norm
snake_case : Optional[int] = tax_enc_dec_attention_key
snake_case : int = tax_enc_dec_attention_out
snake_case : str = tax_enc_dec_attention_query
snake_case : List[Any] = tax_enc_dec_attention_value
snake_case : Tuple = tax_cross_layer_norm
if split_mlp_wi:
snake_case : int = tax_mlp_wi_a
snake_case : Union[str, Any] = tax_mlp_wi_a
else:
snake_case : List[Any] = tax_mlp_wi
snake_case : Tuple = tax_mlp_wo
snake_case : Any = txa_mlp_layer_norm
snake_case : Union[str, Any] = flax_model_decoder_layer_block
# Decoder Normalization
snake_case : List[Any] = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
snake_case : Dict = txa_decoder_norm
# Only for layer 0:
snake_case : str = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
snake_case : Any = tax_decoder_rel_embedding
# Token Embeddings
snake_case : Union[str, Any] = tax_model['''target''']['''token_embedder''']['''embedding''']
snake_case : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
snake_case : Optional[Any] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(__magic_name__ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_a : Dict = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 84 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_a : Dict = logging.get_logger(__name__)
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = None ) -> str:
"""simple docstring"""
snake_case : Any = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
snake_case : str = to_pil_image(__magic_name__ )
snake_case , snake_case : Union[str, Any] = pil_image.size
snake_case : List[Any] = pytesseract.image_to_data(__magic_name__ , lang=__magic_name__ , output_type='''dict''' , config=__magic_name__ )
snake_case , snake_case , snake_case , snake_case , snake_case : Optional[Any] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
snake_case : Union[str, Any] = [idx for idx, word in enumerate(__magic_name__ ) if not word.strip()]
snake_case : Union[str, Any] = [word for idx, word in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : Optional[Any] = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case : List[Any] = []
for x, y, w, h in zip(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
snake_case : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(__magic_name__ )
# finally, normalize the bounding boxes
snake_case : List[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__magic_name__ , __magic_name__ , __magic_name__ ) )
assert len(__magic_name__ ) == len(__magic_name__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a_ ( a ):
A__ : int = ['pixel_values']
def __init__( self : Optional[int] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "" , **UpperCAmelCase__ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Any = size if size is not None else {'''height''': 224, '''width''': 224}
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : Dict = do_resize
snake_case : str = size
snake_case : Optional[int] = resample
snake_case : Union[str, Any] = apply_ocr
snake_case : int = ocr_lang
snake_case : Union[str, Any] = tesseract_config
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ):
"""simple docstring"""
snake_case : Dict = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
snake_case : Tuple = (size['''height'''], size['''width'''])
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : List[Any] , ):
"""simple docstring"""
snake_case : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case : List[Any] = size if size is not None else self.size
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : str = resample if resample is not None else self.resample
snake_case : Optional[int] = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case : List[str] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
snake_case : Any = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
snake_case : Optional[int] = []
snake_case : Union[str, Any] = []
for image in images:
snake_case , snake_case : List[Any] = apply_tesseract(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
words_batch.append(UpperCAmelCase__ )
boxes_batch.append(UpperCAmelCase__ )
if do_resize:
snake_case : Any = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case : int = [flip_channel_order(UpperCAmelCase__ ) for image in images]
snake_case : Optional[Any] = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
snake_case : List[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase__ )
if apply_ocr:
snake_case : Dict = words_batch
snake_case : Dict = boxes_batch
return data
| 84 | 1 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a_ ( __magic_name__ , __magic_name__ ) -> Dict:
"""simple docstring"""
snake_case : Dict = args.log_outputs
snake_case : Optional[int] = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
snake_case : Optional[Any] = load_metric('''wer''' )
snake_case : Union[str, Any] = load_metric('''cer''' )
# compute metrics
snake_case : Union[str, Any] = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
snake_case : List[Any] = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
snake_case : Optional[Any] = F"WER: {wer_result}\nCER: {cer_result}"
print(__magic_name__ )
with open(F"{dataset_id}_eval_results.txt" , '''w''' ) as f:
f.write(__magic_name__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case : Optional[int] = F"log_{dataset_id}_predictions.txt"
snake_case : Optional[int] = F"log_{dataset_id}_targets.txt"
with open(__magic_name__ , '''w''' ) as p, open(__magic_name__ , '''w''' ) as t:
# mapping function to write output
def write_to_file(__magic_name__ , __magic_name__ ):
p.write(F"{i}" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"{i}" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(__magic_name__ , with_indices=__magic_name__ )
def a_ ( __magic_name__ ) -> str:
"""simple docstring"""
snake_case : List[Any] = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case : Optional[Any] = re.sub(__magic_name__ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case : List[str] = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
snake_case : Optional[int] = ''' '''.join(text.split(__magic_name__ ) )
return text
def a_ ( __magic_name__ ) -> List[str]:
"""simple docstring"""
snake_case : Dict = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__magic_name__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case : int = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case : Optional[Any] = feature_extractor.sampling_rate
# resample audio
snake_case : Tuple = dataset.cast_column('''audio''' , Audio(sampling_rate=__magic_name__ ) )
# load eval pipeline
if args.device is None:
snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else -1
snake_case : Optional[int] = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__magic_name__ ):
snake_case : List[str] = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case : List[Any] = prediction['''text''']
snake_case : List[Any] = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
snake_case : str = dataset.map(__magic_name__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
_a : Any = parser.parse_args()
main(args)
| 84 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[Any]=24 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Optional[int]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Optional[Any]=2 , ):
"""simple docstring"""
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : str = patch_size
snake_case : Union[str, Any] = max_length
snake_case : str = num_mel_bins
snake_case : Any = is_training
snake_case : Union[str, Any] = use_labels
snake_case : Tuple = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : str = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : str = scope
snake_case : int = frequency_stride
snake_case : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
snake_case : Any = (self.max_length - self.patch_size) // self.time_stride + 1
snake_case : Union[str, Any] = frequency_out_dimension * time_out_dimension
snake_case : Union[str, Any] = num_patches + 2
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
snake_case : str = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[str] = self.get_config()
return config, input_values, labels
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : str = ASTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : int = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : int = config_and_inputs
snake_case : Tuple = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class a_ ( a , a , unittest.TestCase ):
A__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
A__ : int = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Dict = False
A__ : int = False
A__ : Optional[int] = False
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[int] = ASTModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Any = model_class(UpperCAmelCase__ )
snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : List[str] = ['''input_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = ASTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Dict = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
snake_case , snake_case : int = torchaudio.load(__magic_name__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class a_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : List[str] = self.default_feature_extractor
snake_case : str = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(UpperCAmelCase__ )
snake_case : str = self.default_feature_extractor
snake_case , snake_case : int = prepare_audio()
snake_case : Optional[int] = audio.squeeze().numpy()
snake_case : Optional[Any] = feature_extractor(UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case : Union[str, Any] = model(**UpperCAmelCase__ )
# verify the logits
snake_case : Any = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
snake_case : str = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 84 | 1 |
import argparse
from collections import defaultdict
import yaml
_a : int = 'docs/source/en/_toctree.yml'
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
snake_case : Tuple = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
snake_case : Union[str, Any] = [key for key, value in counts.items() if value > 1]
snake_case : Union[str, Any] = []
for duplicate_key in duplicates:
snake_case : Dict = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def a_ ( __magic_name__=False ) -> Optional[int]:
"""simple docstring"""
with open(__magic_name__ , encoding='''utf-8''' ) as f:
snake_case : Union[str, Any] = yaml.safe_load(f.read() )
# Get to the API doc
snake_case : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case : int = content[api_idx]['''sections''']
# Then to the model doc
snake_case : Optional[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
snake_case : Dict = api_doc[model_idx]['''sections''']
snake_case : List[str] = [(idx, section) for idx, section in enumerate(__magic_name__ ) if '''sections''' in section]
snake_case : Optional[Any] = False
for idx, modality_doc in modalities_docs:
snake_case : Optional[Any] = modality_doc['''sections''']
snake_case : List[Any] = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
snake_case : Any = True
if overwrite:
snake_case : Union[str, Any] = new_modality_doc
if diff:
if overwrite:
snake_case : Dict = model_doc
snake_case : Any = api_doc
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_a : Optional[int] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 84 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : Union[str, Any] = logging.getLogger(__name__)
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class a_ :
A__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class a_ :
A__ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
A__ : str = field(metadata={'help': 'Should contain the data files for the task.'} )
A__ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A__ : bool = field(
default=a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case , snake_case , snake_case : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __magic_name__ )
# Set seed
set_seed(training_args.seed )
try:
snake_case : int = processors[data_args.task_name]()
snake_case : List[str] = processor.get_labels()
snake_case : str = len(__magic_name__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case : Optional[int] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__magic_name__ ) -> Dict:
snake_case : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__magic_name__ , p.label_ids )}
# Data collator
snake_case : Dict = DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case : List[Any] = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case : Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case : Optional[Any] = trainer.evaluate()
snake_case : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__magic_name__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__magic_name__ )
return results
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 84 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a_ ( a ):
A__ : List[Any] = (EulerDiscreteScheduler,)
A__ : int = 10
def lowerCAmelCase( self : Tuple , **UpperCAmelCase__ : List[str] ):
"""simple docstring"""
snake_case : List[str] = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**UpperCAmelCase__ )
return config
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def lowerCAmelCase( self : str ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[int] = self.scheduler_classes[0]
snake_case : Union[str, Any] = self.get_scheduler_config()
snake_case : Any = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
snake_case : List[str] = torch.manual_seed(0 )
snake_case : int = self.dummy_model()
snake_case : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case : int = sample.to(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case : Tuple = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : str = model(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Any = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ )
snake_case : Tuple = output.prev_sample
snake_case : Tuple = torch.sum(torch.abs(UpperCAmelCase__ ) )
snake_case : List[str] = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : int = self.scheduler_classes[0]
snake_case : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case : int = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
snake_case : Optional[int] = torch.manual_seed(0 )
snake_case : int = self.dummy_model()
snake_case : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case : Any = sample.to(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case : Tuple = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = model(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Tuple = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ )
snake_case : Optional[int] = output.prev_sample
snake_case : str = torch.sum(torch.abs(UpperCAmelCase__ ) )
snake_case : Dict = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Optional[int] = self.scheduler_classes[0]
snake_case : Optional[Any] = self.get_scheduler_config()
snake_case : Tuple = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase__ )
snake_case : Union[str, Any] = torch.manual_seed(0 )
snake_case : Dict = self.dummy_model()
snake_case : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case : Optional[int] = sample.to(UpperCAmelCase__ )
for t in scheduler.timesteps:
snake_case : List[Any] = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Optional[int] = model(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Dict = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ )
snake_case : str = output.prev_sample
snake_case : Tuple = torch.sum(torch.abs(UpperCAmelCase__ ) )
snake_case : List[str] = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = self.scheduler_classes[0]
snake_case : Union[str, Any] = self.get_scheduler_config()
snake_case : Union[str, Any] = scheduler_class(**UpperCAmelCase__ , use_karras_sigmas=UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase__ )
snake_case : Dict = torch.manual_seed(0 )
snake_case : str = self.dummy_model()
snake_case : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case : Dict = sample.to(UpperCAmelCase__ )
for t in scheduler.timesteps:
snake_case : List[str] = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Any = model(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ )
snake_case : Union[str, Any] = output.prev_sample
snake_case : Optional[Any] = torch.sum(torch.abs(UpperCAmelCase__ ) )
snake_case : Union[str, Any] = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 84 |
import re
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
snake_case : List[str] = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
_a : Any = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 84 | 1 |
from sklearn.metrics import mean_squared_error
import datasets
_a : Optional[int] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
_a : List[str] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
_a : Tuple = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="uniform_average" , UpperCAmelCase__ : int=True ):
"""simple docstring"""
snake_case : List[str] = mean_squared_error(
UpperCAmelCase__ , UpperCAmelCase__ , sample_weight=UpperCAmelCase__ , multioutput=UpperCAmelCase__ , squared=UpperCAmelCase__ )
return {"mse": mse}
| 84 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : int=18 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Optional[int]=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=True , ):
"""simple docstring"""
snake_case : int = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = num_channels
snake_case : Union[str, Any] = image_size
snake_case : Dict = min_resolution
snake_case : Dict = max_resolution
snake_case : int = do_resize
snake_case : List[str] = size
snake_case : List[Any] = apply_ocr
def lowerCAmelCase( self : int ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a_ ( a , unittest.TestCase ):
A__ : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''apply_ocr''' ) )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
# Initialize image_processing
snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
snake_case : Dict = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : List[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : Tuple = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# with apply_OCR = True
snake_case : int = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case : List[Any] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
snake_case : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
snake_case : Any = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case : Optional[Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
snake_case : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
snake_case : str = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
snake_case : Optional[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 84 | 1 |
def a_ ( __magic_name__ = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
snake_case : str = set()
# Replace all the whitespace in our sentence
snake_case : Optional[int] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__magic_name__ ) == 26
def a_ ( __magic_name__ = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
snake_case : str = [False] * 26
for char in input_str:
if char.islower():
snake_case : str = True
elif char.isupper():
snake_case : Union[str, Any] = True
return all(__magic_name__ )
def a_ ( __magic_name__ = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def a_ ( ) -> None:
"""simple docstring"""
from timeit import timeit
snake_case : Tuple = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=__magic_name__ ) )
print(timeit('''is_pangram_faster()''' , setup=__magic_name__ ) )
print(timeit('''is_pangram_fastest()''' , setup=__magic_name__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 84 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=8 ) -> str:
"""simple docstring"""
snake_case : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if latents is None:
snake_case : int = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case : Optional[Any] = latents.to(UpperCAmelCase__ )
snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case : Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
snake_case : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Optional[int] = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
snake_case : Optional[int] = self._execution_device
snake_case : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Any = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : int = torch.cat(UpperCAmelCase__ , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Dict = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Tuple = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
snake_case : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
snake_case : str = self.scheduler.timesteps
snake_case : Optional[Any] = self.movq.config.latent_channels
snake_case , snake_case : Optional[Any] = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
snake_case : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Optional[int] = {'''image_embeds''': image_embeds, '''hint''': hint}
snake_case : Any = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Any = noise_pred.chunk(2 )
snake_case , snake_case : Dict = variance_pred.chunk(2 )
snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
snake_case : List[Any] = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case : Optional[Any] = image * 0.5 + 0.5
snake_case : int = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : str = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 1 |
def a_ ( __magic_name__ , __magic_name__ = False ) -> bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
snake_case : Any = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
snake_case : Any = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__magic_name__ , 1 ):
if n < _p:
# then we have our last prime to check
snake_case : str = primes[:idx]
break
snake_case , snake_case : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case : Any = False
for r in range(__magic_name__ ):
snake_case : List[Any] = pow(__magic_name__ , d * 2**r , __magic_name__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def a_ ( ) -> None:
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 84 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a_ ( a , unittest.TestCase ):
A__ : Dict = ReformerTokenizer
A__ : Optional[int] = ReformerTokenizerFast
A__ : str = True
A__ : Tuple = False
A__ : str = True
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
super().setUp()
snake_case : str = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = '''<s>'''
snake_case : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1_000 )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case : Any = self.get_tokenizer()
snake_case : str = self.get_rust_tokenizer()
snake_case : Tuple = '''I was born in 92000, and this is falsé.'''
snake_case : str = tokenizer.tokenize(UpperCAmelCase__ )
snake_case : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
snake_case : List[str] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : Optional[int] = tokenizer.encode(UpperCAmelCase__ )
snake_case : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any]=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : str = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# Simple input
snake_case : Union[str, Any] = '''This is a simple input'''
snake_case : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case : int = ('''This is a simple input''', '''This is a pair''')
snake_case : int = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
snake_case : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
snake_case : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case : int = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Any = '''Hello World!'''
snake_case : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case : Union[str, Any] = ''' '''.join(UpperCAmelCase__ )
snake_case : Optional[int] = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' )
snake_case : List[str] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
snake_case : Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case : Tuple = encoded_sequence['''input_ids'''].shape
snake_case : List[Any] = ReformerModel(UpperCAmelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
# fmt: off
snake_case : Tuple = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case : Tuple = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=UpperCAmelCase__ , sequences=UpperCAmelCase__ , )
| 84 | 1 |
import torch
from diffusers import DiffusionPipeline
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
def __call__( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
snake_case : Dict = 1
snake_case : Optional[Any] = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
snake_case : List[Any] = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
snake_case : List[Any] = scheduler_output - scheduler_output + torch.ones_like(UpperCAmelCase__ )
return result
| 84 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case , snake_case : Any = image.size
snake_case , snake_case : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case : Dict = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
snake_case : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case : Tuple = torch.from_numpy(__magic_name__ )
return 2.0 * image - 1.0
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : VQModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[int] = 100 , UpperCAmelCase__ : Optional[float] = 0.0 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[int] = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
snake_case : Any = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase__ )}" )
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[Any] = preprocess(UpperCAmelCase__ )
snake_case , snake_case : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case : str = next(self.unet.parameters() ).dtype
snake_case : Dict = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
snake_case : Any = image.to(device=self.device , dtype=UpperCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device )
snake_case : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_eta:
snake_case : Dict = eta
for t in self.progress_bar(UpperCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
snake_case : Optional[int] = torch.cat([latents, image] , dim=1 )
snake_case : str = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
snake_case : int = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : Any = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
snake_case : Optional[int] = self.vqvae.decode(UpperCAmelCase__ ).sample
snake_case : int = torch.clamp(UpperCAmelCase__ , -1.0 , 1.0 )
snake_case : Dict = image / 2 + 0.5
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Any = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( a , unittest.TestCase ):
A__ : List[Any] = LongformerTokenizer
A__ : Union[str, Any] = True
A__ : Optional[Any] = LongformerTokenizerFast
A__ : Union[str, Any] = True
def lowerCAmelCase( self : str ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
snake_case : str = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
snake_case : Optional[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case : List[Any] = {'''unk_token''': '''<unk>'''}
snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCAmelCase( self : Dict , **UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCAmelCase( self : Any , **UpperCAmelCase__ : Tuple ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Any ):
"""simple docstring"""
snake_case : List[str] = '''lower newer'''
snake_case : Optional[int] = '''lower newer'''
return input_text, output_text
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Optional[int] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case : Tuple = '''lower newer'''
snake_case : List[Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case : List[Any] = tokenizer.tokenize(UpperCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Optional[int] = tokens + [tokenizer.unk_token]
snake_case : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=UpperCAmelCase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=UpperCAmelCase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Any = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
snake_case : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
snake_case : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
snake_case : str = tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
snake_case : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
snake_case : int = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : List[str] = self.get_tokenizer()
snake_case : Optional[int] = '''Encode this sequence.'''
snake_case : int = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
snake_case : Dict = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
snake_case : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Any = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
snake_case : Optional[Any] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
snake_case : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Testing spaces after special tokens
snake_case : Any = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ )} ) # mask token has a left space
snake_case : Any = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
snake_case : Dict = '''Encode <mask> sequence'''
snake_case : str = '''Encode <mask>sequence'''
snake_case : int = tokenizer.encode(UpperCAmelCase__ )
snake_case : List[Any] = encoded.index(UpperCAmelCase__ )
snake_case : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : str = tokenizer.encode(UpperCAmelCase__ )
snake_case : Dict = encoded.index(UpperCAmelCase__ )
snake_case : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : Any = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
snake_case : List[Any] = '''A, <mask> AllenNLP sentence.'''
snake_case : Any = tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
snake_case : Dict = tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
snake_case : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
snake_case : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCAmelCase( self : int ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
snake_case : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , UpperCAmelCase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , UpperCAmelCase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
snake_case : Optional[Any] = F"{text_of_1_token} {text_of_1_token}"
snake_case : Any = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
snake_case : Dict = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ) + 1, len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
snake_case : str = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ) + 1, len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
snake_case : str = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
snake_case : int = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
snake_case : Tuple = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
snake_case : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
snake_case : int = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ) + 1, 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
snake_case : Dict = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ), 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
snake_case : int = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
snake_case : Dict = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ), 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
| 84 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class a_ ( a ):
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Dict = 5
# Realm tok
snake_case : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
snake_case : Any = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[int] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Dict = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=UpperCAmelCase__ , )
return block_records
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Tuple = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = self.get_config()
snake_case : Optional[Any] = self.get_dummy_retriever()
snake_case : Optional[int] = retriever.tokenizer
snake_case : Dict = np.array([0, 3] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Union[str, Any] = tokenizer(
['''the fourth'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : Optional[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : List[str] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.get_config()
snake_case : Optional[int] = self.get_dummy_retriever()
snake_case : List[str] = retriever.tokenizer
snake_case : Optional[Any] = np.array([0, 3, 5] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Any = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : List[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : Union[str, Any] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual([False, True, True] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
snake_case : Optional[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
snake_case : Any = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case : Any = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 84 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_a : Dict = logging.get_logger(__name__)
# General docstring
_a : int = 'RegNetConfig'
# Base docstring
_a : Optional[int] = 'facebook/regnet-y-040'
_a : Dict = [1, 1_088, 7, 7]
# Image classification docstring
_a : Any = 'facebook/regnet-y-040'
_a : Dict = 'tabby, tabby cat'
_a : int = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a_ ( nn.Module ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[str] = "relu" , ):
"""simple docstring"""
super().__init__()
snake_case : Tuple = nn.Convad(
UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , stride=UpperCAmelCase__ , padding=kernel_size // 2 , groups=UpperCAmelCase__ , bias=UpperCAmelCase__ , )
snake_case : Dict = nn.BatchNormad(UpperCAmelCase__ )
snake_case : Union[str, Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase( self : str , UpperCAmelCase__ : Dict ):
"""simple docstring"""
snake_case : Optional[int] = self.convolution(UpperCAmelCase__ )
snake_case : Tuple = self.normalization(UpperCAmelCase__ )
snake_case : Optional[int] = self.activation(UpperCAmelCase__ )
return hidden_state
class a_ ( nn.Module ):
def __init__( self : int , UpperCAmelCase__ : RegNetConfig ):
"""simple docstring"""
super().__init__()
snake_case : Tuple = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
snake_case : Optional[int] = config.num_channels
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
snake_case : str = self.embedder(UpperCAmelCase__ )
return hidden_state
class a_ ( nn.Module ):
def __init__( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 ):
"""simple docstring"""
super().__init__()
snake_case : Optional[int] = nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , stride=UpperCAmelCase__ , bias=UpperCAmelCase__ )
snake_case : str = nn.BatchNormad(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Tensor ):
"""simple docstring"""
snake_case : int = self.convolution(UpperCAmelCase__ )
snake_case : List[Any] = self.normalization(UpperCAmelCase__ )
return hidden_state
class a_ ( nn.Module ):
def __init__( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
"""simple docstring"""
super().__init__()
snake_case : Dict = nn.AdaptiveAvgPoolad((1, 1) )
snake_case : Union[str, Any] = nn.Sequential(
nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 ) , nn.Sigmoid() , )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
# b c h w -> b c 1 1
snake_case : Tuple = self.pooler(UpperCAmelCase__ )
snake_case : str = self.attention(UpperCAmelCase__ )
snake_case : Tuple = hidden_state * attention
return hidden_state
class a_ ( nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 ):
"""simple docstring"""
super().__init__()
snake_case : str = in_channels != out_channels or stride != 1
snake_case : str = max(1 , out_channels // config.groups_width )
snake_case : Dict = (
RegNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
snake_case : Dict = nn.Sequential(
RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ ) , )
snake_case : Optional[int] = ACTaFN[config.hidden_act]
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : str = hidden_state
snake_case : int = self.layer(UpperCAmelCase__ )
snake_case : Dict = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
snake_case : List[Any] = self.activation(UpperCAmelCase__ )
return hidden_state
class a_ ( nn.Module ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 ):
"""simple docstring"""
super().__init__()
snake_case : Dict = in_channels != out_channels or stride != 1
snake_case : Tuple = max(1 , out_channels // config.groups_width )
snake_case : str = (
RegNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
snake_case : List[Any] = nn.Sequential(
RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act ) , RegNetSELayer(UpperCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ ) , )
snake_case : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
snake_case : List[Any] = hidden_state
snake_case : int = self.layer(UpperCAmelCase__ )
snake_case : Union[str, Any] = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
snake_case : str = self.activation(UpperCAmelCase__ )
return hidden_state
class a_ ( nn.Module ):
def __init__( self : Any , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ):
"""simple docstring"""
super().__init__()
snake_case : int = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
snake_case : List[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , ) , *[layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) for _ in range(depth - 1 )] , )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : Dict ):
"""simple docstring"""
snake_case : str = self.layers(UpperCAmelCase__ )
return hidden_state
class a_ ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase__ : RegNetConfig ):
"""simple docstring"""
super().__init__()
snake_case : Any = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
snake_case : List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCAmelCase__ , config.depths[1:] ):
self.stages.append(RegNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__ ) )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True ):
"""simple docstring"""
snake_case : List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case : List[Any] = hidden_states + (hidden_state,)
snake_case : Optional[int] = stage_module(UpperCAmelCase__ )
if output_hidden_states:
snake_case : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ )
class a_ ( a ):
A__ : List[Any] = RegNetConfig
A__ : Tuple = 'regnet'
A__ : str = 'pixel_values'
A__ : Union[str, Any] = True
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : int ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(UpperCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=False ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : str = value
_a : str = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_a : Optional[Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , a , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
super().__init__(UpperCAmelCase__ )
snake_case : Dict = config
snake_case : int = RegNetEmbeddings(UpperCAmelCase__ )
snake_case : int = RegNetEncoder(UpperCAmelCase__ )
snake_case : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None ):
"""simple docstring"""
snake_case : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case : str = return_dict if return_dict is not None else self.config.use_return_dict
snake_case : List[str] = self.embedder(UpperCAmelCase__ )
snake_case : List[str] = self.encoder(
UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
snake_case : str = encoder_outputs[0]
snake_case : Optional[int] = self.pooler(UpperCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , a , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class a_ ( a ):
def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
super().__init__(UpperCAmelCase__ )
snake_case : List[str] = config.num_labels
snake_case : Optional[Any] = RegNetModel(UpperCAmelCase__ )
# classification head
snake_case : Optional[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[torch.LongTensor] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , ):
"""simple docstring"""
snake_case : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case : List[str] = self.regnet(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
snake_case : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
snake_case : str = self.classifier(UpperCAmelCase__ )
snake_case : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case : int = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case : List[str] = '''single_label_classification'''
else:
snake_case : List[str] = '''multi_label_classification'''
if self.config.problem_type == "regression":
snake_case : List[Any] = MSELoss()
if self.num_labels == 1:
snake_case : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case : int = loss_fct(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
snake_case : Optional[int] = CrossEntropyLoss()
snake_case : List[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case : int = BCEWithLogitsLoss()
snake_case : Tuple = loss_fct(UpperCAmelCase__ , UpperCAmelCase__ )
if not return_dict:
snake_case : List[str] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states )
| 84 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : str = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_a : int = logging.get_logger(__name__)
@dataclass
class a_ ( a ):
A__ : int = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Union[str, Any] , **UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case : List[str] = deprecated_arg[3:]
snake_case : Union[str, Any] = not kwargs.pop(UpperCAmelCase__ )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
snake_case : Optional[int] = kwargs.pop('''tpu_name''' , self.tpu_name )
snake_case : List[Any] = kwargs.pop('''device_idx''' , self.device_idx )
snake_case : str = kwargs.pop('''eager_mode''' , self.eager_mode )
snake_case : Optional[Any] = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**UpperCAmelCase__ )
A__ : str = field(
default=a , metadata={'help': 'Name of TPU'} , )
A__ : int = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
A__ : bool = field(default=a , metadata={'help': 'Benchmark models in eager model.'} )
A__ : bool = field(
default=a , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
snake_case : List[Any] = None
if self.tpu:
try:
if self.tpu_name:
snake_case : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
snake_case : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
snake_case : str = None
return tpu
@cached_property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
snake_case : Tuple = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
snake_case : List[Any] = tf.distribute.OneDeviceStrategy(device=F"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
snake_case : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=F"/cpu:{self.device_idx}" )
return strategy
@property
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCAmelCase( self : int ):
"""simple docstring"""
return self.n_gpu > 0
| 84 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_a : Union[str, Any] = {
'yjernite/retribert-base-uncased': 512,
}
_a : Tuple = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class a_ ( a ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Any = PRETRAINED_INIT_CONFIGURATION
A__ : Optional[Any] = RetriBertTokenizer
A__ : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict="[UNK]" , UpperCAmelCase__ : str="[SEP]" , UpperCAmelCase__ : Union[str, Any]="[PAD]" , UpperCAmelCase__ : Dict="[CLS]" , UpperCAmelCase__ : Optional[Any]="[MASK]" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Dict , ):
"""simple docstring"""
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
snake_case : int = getattr(UpperCAmelCase__ , normalizer_state.pop('''type''' ) )
snake_case : List[Any] = do_lower_case
snake_case : Union[str, Any] = strip_accents
snake_case : int = tokenize_chinese_chars
snake_case : int = normalizer_class(**UpperCAmelCase__ )
snake_case : Union[str, Any] = do_lower_case
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=None ):
"""simple docstring"""
snake_case : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : List[Any] = [self.sep_token_id]
snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
snake_case : Tuple = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 84 | 1 |
from maths.prime_check import is_prime
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
snake_case : Dict = F"Input value of [number={number}] must be an integer"
raise TypeError(__magic_name__ )
if is_prime(__magic_name__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
import string
import numpy
def a_ ( __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , __magic_name__ )
class a_ :
A__ : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
A__ : List[str] = numpy.vectorize(lambda a : x % 36 )
A__ : Dict = numpy.vectorize(a )
def __init__( self : List[str] , UpperCAmelCase__ : numpy.ndarray ):
"""simple docstring"""
snake_case : int = self.modulus(UpperCAmelCase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
snake_case : List[str] = encrypt_key.shape[0]
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
return self.key_string.index(UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : int ):
"""simple docstring"""
return self.key_string[round(UpperCAmelCase__ )]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : Tuple = det % len(self.key_string )
snake_case : Tuple = len(self.key_string )
if greatest_common_divisor(UpperCAmelCase__ , len(self.key_string ) ) != 1:
snake_case : List[Any] = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = [char for char in text.upper() if char in self.key_string]
snake_case : Optional[int] = chars[-1]
while len(UpperCAmelCase__ ) % self.break_key != 0:
chars.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = self.process_text(text.upper() )
snake_case : Optional[int] = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : int = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : Tuple = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(self.encrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[
0
]
snake_case : Dict = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : int = det % len(self.key_string )
snake_case : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
snake_case : Any = i
break
snake_case : Any = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCAmelCase__ ) )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Any = self.make_decrypt_key()
snake_case : Optional[Any] = self.process_text(text.upper() )
snake_case : int = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : Any = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : List[str] = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(decrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[0]
snake_case : int = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a_ ( ) -> None:
"""simple docstring"""
snake_case : Any = int(input('''Enter the order of the encryption key: ''' ) )
snake_case : List[Any] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(__magic_name__ ):
snake_case : Optional[Any] = [int(__magic_name__ ) for x in input().split()]
hill_matrix.append(__magic_name__ )
snake_case : List[str] = HillCipher(numpy.array(__magic_name__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
snake_case : int = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
snake_case : List[Any] = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(__magic_name__ ) )
elif option == "2":
snake_case : int = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 84 | 1 |
# flake8: noqa
# Lint as: python3
_a : List[str] = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 84 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( a ):
A__ : List[Any] = 'Salesforce/blip-image-captioning-base'
A__ : Dict = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
A__ : str = 'image_captioner'
A__ : Dict = AutoModelForVisionaSeq
A__ : Optional[Any] = ['image']
A__ : List[str] = ['text']
def __init__( self : List[str] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=UpperCAmelCase__ , return_tensors='''pt''' )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return self.model.generate(**UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0].strip()
| 84 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a_ ( a , unittest.TestCase ):
A__ : Union[str, Any] = LDMTextToImagePipeline
A__ : Dict = TEXT_TO_IMAGE_PARAMS - {
'negative_prompt',
'negative_prompt_embeds',
'cross_attention_kwargs',
'prompt_embeds',
}
A__ : List[Any] = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'callback',
'callback_steps',
}
A__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
A__ : Any = False
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
snake_case : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
snake_case : List[Any] = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , )
torch.manual_seed(0 )
snake_case : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case : Tuple = CLIPTextModel(UpperCAmelCase__ )
snake_case : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vqvae''': vae,
'''bert''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=0 ):
"""simple docstring"""
if str(UpperCAmelCase__ ).startswith('''mps''' ):
snake_case : List[Any] = torch.manual_seed(UpperCAmelCase__ )
else:
snake_case : Optional[Any] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
snake_case : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case : Optional[Any] = self.get_dummy_components()
snake_case : Optional[Any] = LDMTextToImagePipeline(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : List[str] = self.get_dummy_inputs(UpperCAmelCase__ )
snake_case : Tuple = pipe(**UpperCAmelCase__ ).images
snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
snake_case : Union[str, Any] = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=torch.floataa , UpperCAmelCase__ : int=0 ):
"""simple docstring"""
snake_case : Any = torch.manual_seed(UpperCAmelCase__ )
snake_case : str = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 32, 32) )
snake_case : str = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
snake_case : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : Any = self.get_inputs(UpperCAmelCase__ )
snake_case : Dict = pipe(**UpperCAmelCase__ ).images
snake_case : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
snake_case : List[Any] = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] )
snake_case : str = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]=torch.floataa , UpperCAmelCase__ : Union[str, Any]=0 ):
"""simple docstring"""
snake_case : Tuple = torch.manual_seed(UpperCAmelCase__ )
snake_case : List[Any] = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 32, 32) )
snake_case : str = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
snake_case : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[Any] = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : List[Any] = self.get_inputs(UpperCAmelCase__ )
snake_case : List[str] = pipe(**UpperCAmelCase__ ).images[0]
snake_case : str = load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
snake_case : Any = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 84 |
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
snake_case : int = 4
snake_case : Optional[Any] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 84 | 1 |
_a : int = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : Union[str, Any] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 84 |
from sklearn.metrics import fa_score
import datasets
_a : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
_a : Dict = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
_a : List[Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : List[str]="binary" , UpperCAmelCase__ : str=None ):
"""simple docstring"""
snake_case : List[Any] = fa_score(
UpperCAmelCase__ , UpperCAmelCase__ , labels=UpperCAmelCase__ , pos_label=UpperCAmelCase__ , average=UpperCAmelCase__ , sample_weight=UpperCAmelCase__ )
return {"f1": float(UpperCAmelCase__ ) if score.size == 1 else score}
| 84 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : int=18 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Optional[int]=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=True , ):
"""simple docstring"""
snake_case : int = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = num_channels
snake_case : Union[str, Any] = image_size
snake_case : Dict = min_resolution
snake_case : Dict = max_resolution
snake_case : int = do_resize
snake_case : List[str] = size
snake_case : List[Any] = apply_ocr
def lowerCAmelCase( self : int ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a_ ( a , unittest.TestCase ):
A__ : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''apply_ocr''' ) )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
# Initialize image_processing
snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
snake_case : Dict = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : List[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : Tuple = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# with apply_OCR = True
snake_case : int = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case : List[Any] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
snake_case : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
snake_case : Any = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case : Optional[Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
snake_case : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
snake_case : str = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
snake_case : Optional[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 84 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('''only integers accepted as input''' )
else:
snake_case : str = str(abs(__magic_name__ ) )
snake_case : Optional[Any] = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int(''''''.join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 84 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_a : Optional[Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def a_ ( __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
snake_case : Dict = XLMProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
snake_case , snake_case : List[str] = XLMProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
else:
snake_case : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
snake_case , snake_case : List[str] = ProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
snake_case : Tuple = ['''key_proj''', '''value_proj''', '''query_proj''']
snake_case : List[Any] = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
snake_case : Optional[int] = key.split('''.''' )
if attributes[0] == "lm_head":
snake_case : Optional[Any] = prophet
snake_case : int = prophet_old
else:
snake_case : List[str] = prophet.prophetnet
snake_case : str = prophet_old.model
snake_case : Any = False
for attribute in attributes:
if attribute in mapping:
snake_case : Optional[Any] = mapping[attribute]
if not hasattr(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) > 0:
snake_case : Union[str, Any] = attribute
elif hasattr(__magic_name__ , __magic_name__ ):
snake_case : Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
snake_case : str = old_model.weight
logger.info(F"{attribute} is initialized." )
snake_case : List[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
snake_case : Union[str, Any] = old_model.bias
logger.info(F"{attribute} is initialized" )
snake_case : Dict = True
break
elif attribute in special_keys and hasattr(__magic_name__ , '''in_proj_weight''' ):
snake_case : List[Any] = old_model.in_proj_weight.shape[0] // 3
snake_case : Optional[int] = getattr(__magic_name__ , __magic_name__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
snake_case : str = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
snake_case : Any = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
snake_case : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
snake_case : List[str] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
snake_case : Dict = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
snake_case : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
snake_case : List[Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
snake_case : Dict = nn.Parameter(old_model.embed_positions.weight[:512, :] )
snake_case : Union[str, Any] = True
break
if attribute.isdigit():
snake_case : Dict = model[int(__magic_name__ )]
snake_case : Optional[int] = old_model[int(__magic_name__ )]
else:
snake_case : Any = getattr(__magic_name__ , __magic_name__ )
if old_attribute == "":
snake_case : Tuple = old_model
else:
if not hasattr(__magic_name__ , __magic_name__ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
snake_case : str = getattr(__magic_name__ , __magic_name__ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a : Optional[Any] = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 84 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=99 , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Any=9 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : str=0.002 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , ):
"""simple docstring"""
snake_case : Union[str, Any] = parent
snake_case : Union[str, Any] = batch_size
snake_case : Any = encoder_seq_length
snake_case : str = decoder_seq_length
# For common tests
snake_case : Optional[int] = self.decoder_seq_length
snake_case : Optional[Any] = is_training
snake_case : List[Any] = use_attention_mask
snake_case : Union[str, Any] = use_labels
snake_case : Any = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : Any = d_ff
snake_case : Any = relative_attention_num_buckets
snake_case : Optional[Any] = dropout_rate
snake_case : int = initializer_factor
snake_case : Optional[Any] = eos_token_id
snake_case : Dict = pad_token_id
snake_case : Optional[Any] = decoder_start_token_id
snake_case : Union[str, Any] = None
snake_case : List[str] = decoder_layers
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
return TaConfig.from_pretrained('''google/umt5-base''' )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
snake_case : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case : List[Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if decoder_head_mask is None:
snake_case : Tuple = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if cross_attn_head_mask is None:
snake_case : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case : List[str] = input_ids.clamp(self.pad_token_id + 1 )
snake_case : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case : str = self.get_config()
snake_case : Tuple = config.num_attention_heads
snake_case : List[Any] = self.prepare_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, input_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , ):
"""simple docstring"""
snake_case : str = UMTaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : str = model(
input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , )
snake_case : int = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
snake_case : int = result.last_hidden_state
snake_case : Dict = result.past_key_values
snake_case : Dict = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval()
# first forward pass
snake_case : List[Any] = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
snake_case : List[Any] = model(UpperCAmelCase__ )
snake_case : Any = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) + 1 )
snake_case , snake_case : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : Any = model(UpperCAmelCase__ )['''last_hidden_state''']
snake_case : Optional[Any] = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )['''last_hidden_state''']
# select random slice
snake_case : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).half().eval()
snake_case : str = model(**UpperCAmelCase__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(UpperCAmelCase__ ).any().item() )
@require_torch
class a_ ( a , a , a , unittest.TestCase ):
A__ : str = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A__ : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A__ : Any = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A__ : Dict = True
A__ : List[str] = False
A__ : Optional[int] = False
A__ : Optional[int] = True
A__ : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A__ : int = [0.8, 0.9]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
snake_case : Optional[Any] = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=UpperCAmelCase__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Optional[int] = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
snake_case : int = config_and_inputs[0]
snake_case : Union[str, Any] = UMTaForConditionalGeneration(UpperCAmelCase__ ).eval()
model.to(UpperCAmelCase__ )
snake_case : str = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase__ , head_masking.items() ):
snake_case : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case : List[str] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ )
snake_case : Union[str, Any] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , **UpperCAmelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=UpperCAmelCase__ ).to(UpperCAmelCase__ )
snake_case : int = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=UpperCAmelCase__ , legacy=UpperCAmelCase__ )
snake_case : List[str] = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
snake_case : Dict = tokenizer(UpperCAmelCase__ , return_tensors='''pt''' , padding=UpperCAmelCase__ ).input_ids
# fmt: off
snake_case : Optional[Any] = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = model.generate(input_ids.to(UpperCAmelCase__ ) )
snake_case : int = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
snake_case : Tuple = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 84 | 1 |
from collections import defaultdict
from math import ceil, sqrt
def a_ ( __magic_name__ = 1_000_000 , __magic_name__ = 10 ) -> int:
"""simple docstring"""
snake_case : defaultdict = defaultdict(__magic_name__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
snake_case : List[str] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
snake_case : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__magic_name__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"{solution() = }")
| 84 |
import torch
from diffusers import DiffusionPipeline
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
def __call__( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
snake_case : Dict = 1
snake_case : Optional[Any] = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
snake_case : List[Any] = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
snake_case : List[Any] = scheduler_output - scheduler_output + torch.ones_like(UpperCAmelCase__ )
return result
| 84 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Dict = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
snake_case : Optional[int] = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase__ ) , torch_builtin(UpperCAmelCase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase__ ) , gelu_new(UpperCAmelCase__ ) ) )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Optional[int] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
snake_case : str = get_activation('''gelu''' )
snake_case : Optional[Any] = get_activation('''gelu_10''' )
snake_case : Union[str, Any] = torch_builtin(UpperCAmelCase__ )
snake_case : Tuple = geluaa(UpperCAmelCase__ )
snake_case : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCAmelCase__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase( self : int ):
"""simple docstring"""
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(UpperCAmelCase__ ):
get_activation('''bogus''' )
with self.assertRaises(UpperCAmelCase__ ):
get_activation(UpperCAmelCase__ )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Dict = get_activation('''gelu''' )
snake_case : Union[str, Any] = 1
snake_case : Union[str, Any] = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCAmelCase__ ):
snake_case : List[str] = acta.a
| 84 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( a ):
A__ : List[str] = ['image_processor', 'tokenizer']
A__ : Any = 'CLIPImageProcessor'
A__ : Optional[int] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase__ , )
snake_case : List[Any] = kwargs.pop('''feature_extractor''' )
snake_case : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self : Any , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case : int = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if images is not None:
snake_case : Dict = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and images is not None:
snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : str ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : int = self.tokenizer.model_input_names
snake_case : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase__ , )
return self.image_processor_class
@property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase__ , )
return self.image_processor
| 84 | 1 |
import string
import numpy
def a_ ( __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , __magic_name__ )
class a_ :
A__ : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
A__ : List[str] = numpy.vectorize(lambda a : x % 36 )
A__ : Dict = numpy.vectorize(a )
def __init__( self : List[str] , UpperCAmelCase__ : numpy.ndarray ):
"""simple docstring"""
snake_case : int = self.modulus(UpperCAmelCase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
snake_case : List[str] = encrypt_key.shape[0]
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
return self.key_string.index(UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : int ):
"""simple docstring"""
return self.key_string[round(UpperCAmelCase__ )]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : Tuple = det % len(self.key_string )
snake_case : Tuple = len(self.key_string )
if greatest_common_divisor(UpperCAmelCase__ , len(self.key_string ) ) != 1:
snake_case : List[Any] = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = [char for char in text.upper() if char in self.key_string]
snake_case : Optional[int] = chars[-1]
while len(UpperCAmelCase__ ) % self.break_key != 0:
chars.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = self.process_text(text.upper() )
snake_case : Optional[int] = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : int = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : Tuple = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(self.encrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[
0
]
snake_case : Dict = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : int = det % len(self.key_string )
snake_case : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
snake_case : Any = i
break
snake_case : Any = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCAmelCase__ ) )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Any = self.make_decrypt_key()
snake_case : Optional[Any] = self.process_text(text.upper() )
snake_case : int = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : Any = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : List[str] = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(decrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[0]
snake_case : int = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a_ ( ) -> None:
"""simple docstring"""
snake_case : Any = int(input('''Enter the order of the encryption key: ''' ) )
snake_case : List[Any] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(__magic_name__ ):
snake_case : Optional[Any] = [int(__magic_name__ ) for x in input().split()]
hill_matrix.append(__magic_name__ )
snake_case : List[str] = HillCipher(numpy.array(__magic_name__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
snake_case : int = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
snake_case : List[Any] = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(__magic_name__ ) )
elif option == "2":
snake_case : int = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 84 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_a : Optional[Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
_a : str = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
_a : List[Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Optional[int]=0.9 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : List[Any]=500 , UpperCAmelCase__ : Union[str, Any]="gpt2-large" , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : List[Any]=25 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=25 , ):
"""simple docstring"""
snake_case : List[str] = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 84 | 1 |
import requests
from bsa import BeautifulSoup
def a_ ( __magic_name__ , __magic_name__ ) -> str:
"""simple docstring"""
snake_case : Union[str, Any] = BeautifulSoup(requests.get(__magic_name__ , params=__magic_name__ ).content , '''html.parser''' )
snake_case : Optional[int] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
snake_case : Tuple = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_a : Tuple = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 84 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
if "cls_token" in name:
snake_case : Tuple = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
snake_case : Optional[int] = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
snake_case : List[str] = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case : List[Any] = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case : int = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
snake_case : int = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
snake_case : Optional[Any] = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
snake_case : str = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case : Any = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
snake_case : Dict = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
snake_case : Dict = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
snake_case : Dict = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
snake_case : Optional[int] = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def a_ ( __magic_name__ , __magic_name__ ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case : Union[str, Any] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
snake_case : Optional[int] = key.split('''.''' )
snake_case : int = int(key_split[1] )
if "decoder_blocks" in key:
snake_case : List[str] = config.decoder_hidden_size
snake_case : List[Any] = '''decoder.decoder_layers.'''
if "weight" in key:
snake_case : str = val[:dim, :]
snake_case : Optional[Any] = val[dim : dim * 2, :]
snake_case : Any = val[-dim:, :]
elif "bias" in key:
snake_case : Optional[Any] = val[:dim]
snake_case : List[Any] = val[dim : dim * 2]
snake_case : List[Any] = val[-dim:]
else:
snake_case : Optional[int] = config.hidden_size
snake_case : Tuple = '''vit.encoder.layer.'''
if "weight" in key:
snake_case : Optional[Any] = val[:dim, :]
snake_case : str = val[dim : dim * 2, :]
snake_case : Union[str, Any] = val[-dim:, :]
elif "bias" in key:
snake_case : Tuple = val[:dim]
snake_case : int = val[dim : dim * 2]
snake_case : Optional[Any] = val[-dim:]
else:
snake_case : Optional[Any] = val
return orig_state_dict
def a_ ( __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : List[str] = ViTMAEConfig()
if "large" in checkpoint_url:
snake_case : str = 1_024
snake_case : Tuple = 4_096
snake_case : Optional[Any] = 24
snake_case : List[Any] = 16
elif "huge" in checkpoint_url:
snake_case : Tuple = 14
snake_case : int = 1_280
snake_case : Dict = 5_120
snake_case : Tuple = 32
snake_case : Optional[Any] = 16
snake_case : Optional[Any] = ViTMAEForPreTraining(__magic_name__ )
snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''model''']
snake_case : int = ViTMAEImageProcessor(size=config.image_size )
snake_case : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
snake_case : Tuple = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
snake_case : List[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
snake_case : Dict = ViTMAEImageProcessor(size=config.image_size )
snake_case : str = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
snake_case : Union[str, Any] = model(**__magic_name__ )
snake_case : Optional[Any] = outputs.logits
if "large" in checkpoint_url:
snake_case : Any = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
snake_case : List[Any] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
snake_case : Dict = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a : str = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 84 | 1 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
_a : Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
for attribute in key.split('''.''' ):
snake_case : int = getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
snake_case : Optional[int] = getattr(__magic_name__ , __magic_name__ ).shape
else:
snake_case : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
snake_case : str = value
elif weight_type == "weight_g":
snake_case : List[Any] = value
elif weight_type == "weight_v":
snake_case : Tuple = value
elif weight_type == "bias":
snake_case : List[Any] = value
else:
snake_case : Union[str, Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def a_ ( __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
snake_case : Dict = []
snake_case : Optional[Any] = fairseq_model.state_dict()
snake_case : Optional[Any] = hf_model.feature_extractor
snake_case : Tuple = hf_model.adapter
for name, value in fairseq_dict.items():
snake_case : Tuple = False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , )
snake_case : Optional[Any] = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
snake_case : Any = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
snake_case : Tuple = True
if "*" in mapped_key:
snake_case : Optional[int] = name.split(__magic_name__ )[0].split('''.''' )[-2]
snake_case : Dict = mapped_key.replace('''*''' , __magic_name__ )
if "weight_g" in name:
snake_case : Tuple = '''weight_g'''
elif "weight_v" in name:
snake_case : Tuple = '''weight_v'''
elif "bias" in name:
snake_case : Union[str, Any] = '''bias'''
elif "weight" in name:
snake_case : Union[str, Any] = '''weight'''
else:
snake_case : List[Any] = None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(F"Unused weights: {unused_weights}" )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[Any] = full_name.split('''conv_layers.''' )[-1]
snake_case : Tuple = name.split('''.''' )
snake_case : List[Any] = int(items[0] )
snake_case : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
snake_case : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
snake_case : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
snake_case : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
snake_case : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__magic_name__ )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case : Any = full_name.split('''adaptor.''' )[-1]
snake_case : Union[str, Any] = name.split('''.''' )
if items[1].isdigit():
snake_case : Tuple = int(items[1] )
else:
snake_case : int = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
snake_case : Dict = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
snake_case : int = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
snake_case : List[str] = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
snake_case : Optional[Any] = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(__magic_name__ , __magic_name__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
snake_case : List[Any] = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
snake_case : int = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(__magic_name__ )
def a_ ( __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
snake_case , snake_case : Optional[Any] = emb.weight.shape
snake_case : List[str] = nn.Linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
snake_case : Tuple = emb.weight.data
return lin_layer
@torch.no_grad()
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> Any:
"""simple docstring"""
snake_case : int = WavaVecaConfig.from_pretrained(
__magic_name__ , add_adapter=__magic_name__ , adapter_stride=__magic_name__ , adapter_kernel_size=__magic_name__ , use_auth_token=__magic_name__ , output_hidden_size=__magic_name__ , )
snake_case : Union[str, Any] = MBartConfig.from_pretrained(__magic_name__ )
# load model
snake_case , snake_case , snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
snake_case : List[Any] = model[0].eval()
# load feature extractor
snake_case : str = WavaVecaFeatureExtractor.from_pretrained(__magic_name__ , use_auth_token=__magic_name__ )
# set weights for wav2vec2 encoder
snake_case : Dict = WavaVecaModel(__magic_name__ )
recursively_load_weights_wavaveca(model.encoder , __magic_name__ )
# load decoder weights
snake_case : Optional[int] = MBartForCausalLM(__magic_name__ )
snake_case , snake_case : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__magic_name__ )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
snake_case : Optional[Any] = SpeechEncoderDecoderModel(encoder=__magic_name__ , decoder=__magic_name__ )
snake_case : Union[str, Any] = False
snake_case : Union[str, Any] = MBartaaTokenizer(__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
snake_case : List[Any] = hf_wavavec.config.to_dict()
snake_case : List[Any] = tokenizer.pad_token_id
snake_case : Dict = tokenizer.bos_token_id
snake_case : List[str] = tokenizer.eos_token_id
snake_case : str = '''mbart50'''
snake_case : Optional[int] = '''wav2vec2'''
snake_case : str = tokenizer.eos_token_id
snake_case : Optional[int] = 250_004
snake_case : Union[str, Any] = tokenizer.eos_token_id
snake_case : List[Any] = SpeechEncoderDecoderConfig.from_dict(__magic_name__ )
hf_wavavec.save_pretrained(__magic_name__ )
feature_extractor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1_024, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=250_004, type=int, help='`decoder_start_token_id` of model config')
_a : List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 84 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : Optional[Any] = 16
_a : Union[str, Any] = 32
def a_ ( __magic_name__ , __magic_name__ = 16 ) -> Dict:
"""simple docstring"""
snake_case : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case : Union[str, Any] = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case : Dict = 8
else:
snake_case : Union[str, Any] = None
return tokenizer.pad(
__magic_name__ , padding='''longest''' , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
snake_case : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : Optional[int] = mocked_dataloaders # noqa: F811
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __magic_name__ ) == "1":
snake_case : Optional[int] = 2
# Initialize accelerator
snake_case : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : Dict = config['''lr''']
snake_case : Any = int(config['''num_epochs'''] )
snake_case : List[str] = int(config['''seed'''] )
snake_case : List[Any] = int(config['''batch_size'''] )
snake_case : Tuple = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : str = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case : Optional[int] = AdamW(params=model.parameters() , lr=__magic_name__ )
snake_case , snake_case : List[Any] = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate scheduler
snake_case : int = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case : Tuple = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case : int = model(**__magic_name__ )
snake_case : Optional[int] = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case : List[str] = model(**__magic_name__ )
snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
snake_case , snake_case : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
snake_case : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__magic_name__ , default=__magic_name__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case : Optional[Any] = parser.parse_args()
snake_case : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 84 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : str = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class a_ ( a ):
A__ : str = 'sew-d'
def __init__( self : Tuple , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Tuple=768 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Any=3_072 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Optional[int]=512 , UpperCAmelCase__ : Tuple=256 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=("p2c", "c2p") , UpperCAmelCase__ : int="layer_norm" , UpperCAmelCase__ : List[str]="gelu_python" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : Tuple=1e-7 , UpperCAmelCase__ : Union[str, Any]=1e-5 , UpperCAmelCase__ : List[Any]="group" , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Dict=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCAmelCase__ : Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase__ : Any=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : List[Any]=128 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Tuple=0.05 , UpperCAmelCase__ : Tuple=10 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : Dict=10 , UpperCAmelCase__ : List[str]=0 , UpperCAmelCase__ : List[Any]="mean" , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : Union[str, Any]=256 , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : str=2 , **UpperCAmelCase__ : Optional[int] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
snake_case : Dict = hidden_size
snake_case : List[str] = feat_extract_norm
snake_case : List[str] = feat_extract_activation
snake_case : int = list(UpperCAmelCase__ )
snake_case : Optional[Any] = list(UpperCAmelCase__ )
snake_case : Any = list(UpperCAmelCase__ )
snake_case : Union[str, Any] = conv_bias
snake_case : Dict = num_conv_pos_embeddings
snake_case : List[str] = num_conv_pos_embedding_groups
snake_case : Any = len(self.conv_dim )
snake_case : Optional[int] = num_hidden_layers
snake_case : Dict = intermediate_size
snake_case : List[str] = squeeze_factor
snake_case : Optional[Any] = max_position_embeddings
snake_case : str = position_buckets
snake_case : str = share_att_key
snake_case : Optional[int] = relative_attention
snake_case : Dict = norm_rel_ebd
snake_case : str = list(UpperCAmelCase__ )
snake_case : Optional[int] = hidden_act
snake_case : Union[str, Any] = num_attention_heads
snake_case : str = hidden_dropout
snake_case : Optional[int] = attention_dropout
snake_case : Any = activation_dropout
snake_case : Optional[Any] = feat_proj_dropout
snake_case : List[Any] = final_dropout
snake_case : Any = layer_norm_eps
snake_case : Optional[int] = feature_layer_norm_eps
snake_case : str = initializer_range
snake_case : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case : List[str] = apply_spec_augment
snake_case : Optional[int] = mask_time_prob
snake_case : Union[str, Any] = mask_time_length
snake_case : List[Any] = mask_time_min_masks
snake_case : Dict = mask_feature_prob
snake_case : Optional[Any] = mask_feature_length
snake_case : int = mask_feature_min_masks
# ctc loss
snake_case : Optional[Any] = ctc_loss_reduction
snake_case : int = ctc_zero_infinity
# sequence classification
snake_case : str = use_weighted_layer_sum
snake_case : List[Any] = classifier_proj_size
@property
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 84 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_a : Dict = logging.get_logger(__name__)
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = None ) -> str:
"""simple docstring"""
snake_case : Any = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
snake_case : str = to_pil_image(__magic_name__ )
snake_case , snake_case : Union[str, Any] = pil_image.size
snake_case : List[Any] = pytesseract.image_to_data(__magic_name__ , lang=__magic_name__ , output_type='''dict''' , config=__magic_name__ )
snake_case , snake_case , snake_case , snake_case , snake_case : Optional[Any] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
snake_case : Union[str, Any] = [idx for idx, word in enumerate(__magic_name__ ) if not word.strip()]
snake_case : Union[str, Any] = [word for idx, word in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : Optional[Any] = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case : List[Any] = []
for x, y, w, h in zip(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
snake_case : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(__magic_name__ )
# finally, normalize the bounding boxes
snake_case : List[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__magic_name__ , __magic_name__ , __magic_name__ ) )
assert len(__magic_name__ ) == len(__magic_name__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a_ ( a ):
A__ : int = ['pixel_values']
def __init__( self : Optional[int] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "" , **UpperCAmelCase__ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Any = size if size is not None else {'''height''': 224, '''width''': 224}
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : Dict = do_resize
snake_case : str = size
snake_case : Optional[int] = resample
snake_case : Union[str, Any] = apply_ocr
snake_case : int = ocr_lang
snake_case : Union[str, Any] = tesseract_config
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ):
"""simple docstring"""
snake_case : Dict = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
snake_case : Tuple = (size['''height'''], size['''width'''])
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : List[Any] , ):
"""simple docstring"""
snake_case : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case : List[Any] = size if size is not None else self.size
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : str = resample if resample is not None else self.resample
snake_case : Optional[int] = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case : List[str] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
snake_case : Any = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
snake_case : Optional[int] = []
snake_case : Union[str, Any] = []
for image in images:
snake_case , snake_case : List[Any] = apply_tesseract(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
words_batch.append(UpperCAmelCase__ )
boxes_batch.append(UpperCAmelCase__ )
if do_resize:
snake_case : Any = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case : int = [flip_channel_order(UpperCAmelCase__ ) for image in images]
snake_case : Optional[Any] = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
snake_case : List[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase__ )
if apply_ocr:
snake_case : Dict = words_batch
snake_case : Dict = boxes_batch
return data
| 84 | 1 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class a_ :
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=99 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Dict=37 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Union[str, Any]=512 , UpperCAmelCase__ : Tuple=16 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : Tuple=None , ):
"""simple docstring"""
snake_case : Tuple = parent
snake_case : Tuple = batch_size
snake_case : int = seq_length
snake_case : str = is_training
snake_case : Any = use_input_mask
snake_case : Any = use_token_type_ids
snake_case : str = use_labels
snake_case : str = vocab_size
snake_case : Dict = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_size
snake_case : Dict = hidden_act
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : str = max_position_embeddings
snake_case : List[str] = type_vocab_size
snake_case : List[str] = type_sequence_label_size
snake_case : Any = initializer_range
snake_case : Optional[int] = num_labels
snake_case : str = num_choices
snake_case : Dict = scope
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_input_mask:
snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : List[Any] = None
if self.use_token_type_ids:
snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : List[Any] = None
snake_case : Optional[Any] = None
snake_case : Any = None
if self.use_labels:
snake_case : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : int = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Union[str, Any] = LlamaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Optional[int] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
snake_case : Optional[int] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , ):
"""simple docstring"""
snake_case : Optional[int] = True
snake_case : List[Any] = LlamaModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Optional[Any] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
snake_case : Optional[int] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
snake_case : List[str] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , ):
"""simple docstring"""
snake_case : List[str] = LlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Any = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , ):
"""simple docstring"""
snake_case : str = True
snake_case : Optional[int] = True
snake_case : Optional[int] = LlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
snake_case : str = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
snake_case : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case : List[Any] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
snake_case : int = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
# select random slice
snake_case : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Any = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Tuple = config_and_inputs
snake_case : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a_ ( a , a , a , unittest.TestCase ):
A__ : Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
A__ : Any = (LlamaForCausalLM,) if is_torch_available() else ()
A__ : Union[str, Any] = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = False
A__ : Tuple = False
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Dict = LlamaModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : Union[str, Any] = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case , snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : List[str] = 3
snake_case : Dict = input_dict['''input_ids''']
snake_case : int = input_ids.ne(1 ).to(UpperCAmelCase__ )
snake_case : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case : Dict = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Union[str, Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : List[Any] = 3
snake_case : Any = '''single_label_classification'''
snake_case : Optional[int] = input_dict['''input_ids''']
snake_case : Dict = input_ids.ne(1 ).to(UpperCAmelCase__ )
snake_case : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case : int = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : str = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Tuple = 3
snake_case : Dict = '''multi_label_classification'''
snake_case : Optional[int] = input_dict['''input_ids''']
snake_case : Optional[Any] = input_ids.ne(1 ).to(UpperCAmelCase__ )
snake_case : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case : Dict = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Union[str, Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case , snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : List[str] = ids_tensor([1, 10] , config.vocab_size )
snake_case : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case : Dict = LlamaModel(UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
original_model.eval()
snake_case : Dict = original_model(UpperCAmelCase__ ).last_hidden_state
snake_case : Union[str, Any] = original_model(UpperCAmelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case : List[Any] = {'''type''': scaling_type, '''factor''': 10.0}
snake_case : int = LlamaModel(UpperCAmelCase__ )
scaled_model.to(UpperCAmelCase__ )
scaled_model.eval()
snake_case : Optional[Any] = scaled_model(UpperCAmelCase__ ).last_hidden_state
snake_case : List[Any] = scaled_model(UpperCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
snake_case : int = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
snake_case : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case : Optional[Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
snake_case : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
snake_case : Any = model(torch.tensor(UpperCAmelCase__ ) )
# Expected mean on dim = -1
snake_case : Dict = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case : List[Any] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
snake_case : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
snake_case : str = model(torch.tensor(UpperCAmelCase__ ) )
# Expected mean on dim = -1
snake_case : Optional[int] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case : Optional[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : List[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
snake_case : List[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
snake_case : Tuple = model(torch.tensor(UpperCAmelCase__ ) )
snake_case : Union[str, Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
# fmt: off
snake_case : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[int] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
snake_case : List[Any] = '''Simply put, the theory of relativity states that '''
snake_case : int = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
snake_case : List[str] = tokenizer.encode(UpperCAmelCase__ , return_tensors='''pt''' )
snake_case : List[Any] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCAmelCase__ )
# greedy generation outputs
snake_case : str = model.generate(UpperCAmelCase__ , max_new_tokens=64 , top_p=UpperCAmelCase__ , temperature=1 , do_sample=UpperCAmelCase__ )
snake_case : List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 84 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[Any]=24 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Optional[int]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Optional[Any]=2 , ):
"""simple docstring"""
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : str = patch_size
snake_case : Union[str, Any] = max_length
snake_case : str = num_mel_bins
snake_case : Any = is_training
snake_case : Union[str, Any] = use_labels
snake_case : Tuple = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : str = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : str = scope
snake_case : int = frequency_stride
snake_case : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
snake_case : Any = (self.max_length - self.patch_size) // self.time_stride + 1
snake_case : Union[str, Any] = frequency_out_dimension * time_out_dimension
snake_case : Union[str, Any] = num_patches + 2
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
snake_case : str = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[str] = self.get_config()
return config, input_values, labels
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : str = ASTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : int = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : int = config_and_inputs
snake_case : Tuple = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class a_ ( a , a , unittest.TestCase ):
A__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
A__ : int = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Dict = False
A__ : int = False
A__ : Optional[int] = False
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[int] = ASTModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Any = model_class(UpperCAmelCase__ )
snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : List[str] = ['''input_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = ASTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Dict = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
snake_case , snake_case : int = torchaudio.load(__magic_name__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class a_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : List[str] = self.default_feature_extractor
snake_case : str = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(UpperCAmelCase__ )
snake_case : str = self.default_feature_extractor
snake_case , snake_case : int = prepare_audio()
snake_case : Optional[int] = audio.squeeze().numpy()
snake_case : Optional[Any] = feature_extractor(UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case : Union[str, Any] = model(**UpperCAmelCase__ )
# verify the logits
snake_case : Any = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
snake_case : str = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 84 | 1 |
from timeit import timeit
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
snake_case : Optional[Any] = 0
while number:
number &= number - 1
result += 1
return result
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
snake_case : int = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a_ ( ) -> None:
"""simple docstring"""
def do_benchmark(__magic_name__ ) -> None:
snake_case : Tuple = '''import __main__ as z'''
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(__magic_name__ ) = }" )
snake_case : Tuple = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__magic_name__ )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(__magic_name__ ) = }" )
snake_case : Union[str, Any] = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__magic_name__ , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(__magic_name__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 84 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : Union[str, Any] = logging.getLogger(__name__)
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class a_ :
A__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class a_ :
A__ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
A__ : str = field(metadata={'help': 'Should contain the data files for the task.'} )
A__ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A__ : bool = field(
default=a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case , snake_case , snake_case : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __magic_name__ )
# Set seed
set_seed(training_args.seed )
try:
snake_case : int = processors[data_args.task_name]()
snake_case : List[str] = processor.get_labels()
snake_case : str = len(__magic_name__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case : Optional[int] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__magic_name__ ) -> Dict:
snake_case : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__magic_name__ , p.label_ids )}
# Data collator
snake_case : Dict = DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case : List[Any] = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case : Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case : Optional[Any] = trainer.evaluate()
snake_case : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__magic_name__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__magic_name__ )
return results
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 84 | 1 |
import argparse
import datetime
def a_ ( __magic_name__ ) -> str:
"""simple docstring"""
snake_case : Any = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
snake_case : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__magic_name__ ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
snake_case : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
snake_case : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
snake_case : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
snake_case : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
snake_case : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
snake_case : Tuple = datetime.date(int(__magic_name__ ) , int(__magic_name__ ) , int(__magic_name__ ) )
# Start math
if m <= 2:
snake_case : str = y - 1
snake_case : Dict = m + 12
# maths var
snake_case : int = int(str(__magic_name__ )[:2] )
snake_case : int = int(str(__magic_name__ )[2:] )
snake_case : int = int(2.6 * m - 5.39 )
snake_case : int = int(c / 4 )
snake_case : int = int(k / 4 )
snake_case : int = int(d + k )
snake_case : int = int(t + u + v + x )
snake_case : int = int(z - (2 * c) )
snake_case : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
snake_case : str = F"Your date {date_input}, is a {days[str(__magic_name__ )]}!"
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : Tuple = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
_a : List[Any] = parser.parse_args()
zeller(args.date_input)
| 84 |
import re
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
snake_case : List[str] = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
_a : Any = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 84 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : int=18 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Optional[int]=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=True , ):
"""simple docstring"""
snake_case : int = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = num_channels
snake_case : Union[str, Any] = image_size
snake_case : Dict = min_resolution
snake_case : Dict = max_resolution
snake_case : int = do_resize
snake_case : List[str] = size
snake_case : List[Any] = apply_ocr
def lowerCAmelCase( self : int ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a_ ( a , unittest.TestCase ):
A__ : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''apply_ocr''' ) )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
# Initialize image_processing
snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
snake_case : Dict = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : List[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : Tuple = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# with apply_OCR = True
snake_case : int = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case : List[Any] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
snake_case : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
snake_case : Any = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case : Optional[Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
snake_case : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
snake_case : str = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
snake_case : Optional[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 84 | 1 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_a : List[Any] = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
_a : Dict = parser.parse_args()
_a : Optional[int] = 'cpu'
_a : List[Any] = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
_a : str = 'path-to-your-trained-model'
_a : Optional[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_a : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_a : Optional[Any] = pipe.to(device)
# to channels last
_a : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
_a : Union[str, Any] = pipe.vae.to(memory_format=torch.channels_last)
_a : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_a : List[str] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_a : List[Any] = torch.randn(2, 4, 64, 64)
_a : List[Any] = torch.rand(1) * 999
_a : Any = torch.randn(2, 77, 768)
_a : int = (sample, timestep, encoder_hidden_status)
try:
_a : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_a : Dict = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_a : Optional[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_a : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_a : Dict = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_a : Optional[Any] = 666
_a : Optional[Any] = torch.Generator(device).manual_seed(seed)
_a : List[Any] = {'generator': generator}
if args.steps is not None:
_a : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_a : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 84 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=8 ) -> str:
"""simple docstring"""
snake_case : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if latents is None:
snake_case : int = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case : Optional[Any] = latents.to(UpperCAmelCase__ )
snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case : Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
snake_case : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Optional[int] = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
snake_case : Optional[int] = self._execution_device
snake_case : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Any = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : int = torch.cat(UpperCAmelCase__ , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Dict = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Tuple = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
snake_case : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
snake_case : str = self.scheduler.timesteps
snake_case : Optional[Any] = self.movq.config.latent_channels
snake_case , snake_case : Optional[Any] = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
snake_case : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Optional[int] = {'''image_embeds''': image_embeds, '''hint''': hint}
snake_case : Any = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Any = noise_pred.chunk(2 )
snake_case , snake_case : Dict = variance_pred.chunk(2 )
snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
snake_case : List[Any] = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case : Optional[Any] = image * 0.5 + 0.5
snake_case : int = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : str = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 1 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_a : List[Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_a : Any = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
_a : Tuple = spec.loader.load_module()
_a : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_a : Any = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
_a : Optional[int] = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def a_ ( ) -> Tuple:
"""simple docstring"""
snake_case : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case : Optional[Any] = False
# source code of `config_class`
snake_case : Any = inspect.getsource(__magic_name__ )
snake_case : Union[str, Any] = _re_checkpoint.findall(__magic_name__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case , snake_case : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case : str = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
snake_case : Tuple = True
break
snake_case : Optional[int] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case : Union[str, Any] = '''\n'''.join(sorted(__magic_name__ ) )
raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 84 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a_ ( a , unittest.TestCase ):
A__ : Dict = ReformerTokenizer
A__ : Optional[int] = ReformerTokenizerFast
A__ : str = True
A__ : Tuple = False
A__ : str = True
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
super().setUp()
snake_case : str = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = '''<s>'''
snake_case : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1_000 )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case : Any = self.get_tokenizer()
snake_case : str = self.get_rust_tokenizer()
snake_case : Tuple = '''I was born in 92000, and this is falsé.'''
snake_case : str = tokenizer.tokenize(UpperCAmelCase__ )
snake_case : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
snake_case : List[str] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : Optional[int] = tokenizer.encode(UpperCAmelCase__ )
snake_case : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any]=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : str = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# Simple input
snake_case : Union[str, Any] = '''This is a simple input'''
snake_case : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case : int = ('''This is a simple input''', '''This is a pair''')
snake_case : int = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
snake_case : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
snake_case : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case : int = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Any = '''Hello World!'''
snake_case : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case : Union[str, Any] = ''' '''.join(UpperCAmelCase__ )
snake_case : Optional[int] = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' )
snake_case : List[str] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
snake_case : Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case : Tuple = encoded_sequence['''input_ids'''].shape
snake_case : List[Any] = ReformerModel(UpperCAmelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
# fmt: off
snake_case : Tuple = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case : Tuple = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=UpperCAmelCase__ , sequences=UpperCAmelCase__ , )
| 84 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_a : Optional[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_a : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : List[str] = state_dict.pop(__magic_name__ )
snake_case : List[Any] = val
def a_ ( __magic_name__ ) -> str:
"""simple docstring"""
snake_case : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case : Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
snake_case : Dict = value
else:
snake_case : Tuple = value
return new_state_dict
def a_ ( __magic_name__ ) -> Dict:
"""simple docstring"""
snake_case : int = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case : List[str] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
snake_case : Optional[Any] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case : Union[str, Any] = in_proj_weight[:256, :]
snake_case : int = in_proj_bias[:256]
snake_case : int = in_proj_weight[256:512, :]
snake_case : int = in_proj_bias[256:512]
snake_case : Tuple = in_proj_weight[-256:, :]
snake_case : Any = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
snake_case : int = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
snake_case : Tuple = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case : Optional[int] = in_proj_weight[:256, :]
snake_case : Dict = in_proj_bias[:256]
snake_case : Any = in_proj_weight[256:512, :]
snake_case : List[str] = in_proj_bias[256:512]
snake_case : int = in_proj_weight[-256:, :]
snake_case : str = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
snake_case : Optional[Any] = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
snake_case : int = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
snake_case : Tuple = in_proj_weight_cross_attn[:256, :]
snake_case : int = in_proj_bias_cross_attn[:256]
snake_case : Any = in_proj_weight_cross_attn[256:512, :]
snake_case : List[Any] = in_proj_bias_cross_attn[256:512]
snake_case : List[Any] = in_proj_weight_cross_attn[-256:, :]
snake_case : int = in_proj_bias_cross_attn[-256:]
def a_ ( __magic_name__ , __magic_name__ ) -> Dict:
"""simple docstring"""
snake_case , snake_case : List[str] = image.size
snake_case : str = max(__magic_name__ , __magic_name__ )
snake_case : str = 800 if '''detection''' in checkpoint_url else 1_000
snake_case : int = target_max_size / current_max_size
snake_case : Dict = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a_ ( __magic_name__ ) -> Optional[int]:
"""simple docstring"""
snake_case : Optional[int] = F.to_tensor(__magic_name__ )
snake_case : Any = F.normalize(__magic_name__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
"""simple docstring"""
logger.info('''Converting model...''' )
# load original state dict
snake_case : Any = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case : Union[str, Any] = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case : List[str] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
snake_case : Union[str, Any] = state_dict.pop(__magic_name__ )
snake_case : List[Any] = val
# create HuggingFace model and load state dict
snake_case : Union[str, Any] = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
snake_case : Optional[Any] = 15
snake_case : str = 2
snake_case : Dict = {0: '''table''', 1: '''table rotated'''}
snake_case : Dict = idalabel
snake_case : str = {v: k for k, v in idalabel.items()}
else:
snake_case : Union[str, Any] = 125
snake_case : Union[str, Any] = 6
snake_case : Union[str, Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
snake_case : Optional[Any] = idalabel
snake_case : List[str] = {v: k for k, v in idalabel.items()}
snake_case : Any = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1_000 )
snake_case : Any = TableTransformerForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# verify our conversion
snake_case : Any = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
snake_case : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__magic_name__ )
snake_case : Any = Image.open(__magic_name__ ).convert('''RGB''' )
snake_case : Dict = normalize(resize(__magic_name__ , __magic_name__ ) ).unsqueeze(0 )
snake_case : Dict = model(__magic_name__ )
if "detection" in checkpoint_url:
snake_case : int = (1, 15, 3)
snake_case : Tuple = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
snake_case : List[str] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
snake_case : Dict = (1, 125, 7)
snake_case : Optional[int] = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
snake_case : int = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
snake_case : Tuple = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(__magic_name__ )
image_processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_a : str = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_a : Dict = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 84 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case , snake_case : Any = image.size
snake_case , snake_case : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case : Dict = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
snake_case : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case : Tuple = torch.from_numpy(__magic_name__ )
return 2.0 * image - 1.0
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : VQModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[int] = 100 , UpperCAmelCase__ : Optional[float] = 0.0 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[int] = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
snake_case : Any = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase__ )}" )
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[Any] = preprocess(UpperCAmelCase__ )
snake_case , snake_case : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case : str = next(self.unet.parameters() ).dtype
snake_case : Dict = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
snake_case : Any = image.to(device=self.device , dtype=UpperCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device )
snake_case : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_eta:
snake_case : Dict = eta
for t in self.progress_bar(UpperCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
snake_case : Optional[int] = torch.cat([latents, image] , dim=1 )
snake_case : str = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
snake_case : int = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : Any = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
snake_case : Optional[int] = self.vqvae.decode(UpperCAmelCase__ ).sample
snake_case : int = torch.clamp(UpperCAmelCase__ , -1.0 , 1.0 )
snake_case : Dict = image / 2 + 0.5
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Any = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 1 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( a , unittest.TestCase ):
A__ : Optional[Any] = OpenAIGPTTokenizer
A__ : List[str] = OpenAIGPTTokenizerFast
A__ : Union[str, Any] = True
A__ : List[Any] = False
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
snake_case : Dict = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
snake_case : Tuple = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return "lower newer", "lower newer"
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Union[str, Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
snake_case : List[Any] = '''lower'''
snake_case : Tuple = ['''low''', '''er</w>''']
snake_case : str = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Any = tokens + ['''<unk>''']
snake_case : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase( self : str , UpperCAmelCase__ : Dict=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# Simple input
snake_case : Union[str, Any] = '''This is a simple input'''
snake_case : Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case : List[Any] = ('''This is a simple input''', '''This is a pair''')
snake_case : Dict = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class a_ ( a ):
pass
| 84 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class a_ ( a ):
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Dict = 5
# Realm tok
snake_case : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
snake_case : Any = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[int] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Dict = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=UpperCAmelCase__ , )
return block_records
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Tuple = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = self.get_config()
snake_case : Optional[Any] = self.get_dummy_retriever()
snake_case : Optional[int] = retriever.tokenizer
snake_case : Dict = np.array([0, 3] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Union[str, Any] = tokenizer(
['''the fourth'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : Optional[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : List[str] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.get_config()
snake_case : Optional[int] = self.get_dummy_retriever()
snake_case : List[str] = retriever.tokenizer
snake_case : Optional[Any] = np.array([0, 3, 5] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Any = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : List[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : Union[str, Any] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual([False, True, True] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
snake_case : Optional[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
snake_case : Any = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case : Any = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 84 | 1 |
from collections import namedtuple
_a : Any = namedtuple('from_to', 'from_ to')
_a : List[Any] = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.0_01, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_04_54, 2_64.1_72),
'cubicyard': from_to(0.7_64_55, 1.3_07_95),
'cubicfoot': from_to(0.0_28, 35.31_47),
'cup': from_to(0.0_00_23_65_88, 42_26.75),
}
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> float:
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ ''', '''.join(__magic_name__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ ''', '''.join(__magic_name__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : str = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 | 1 |
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
snake_case : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
snake_case : set[int] = set()
return any(
node not in visited and depth_first_search(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
for node in graph )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> bool:
"""simple docstring"""
visited.add(__magic_name__ )
rec_stk.add(__magic_name__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__magic_name__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 84 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_a : Union[str, Any] = {
'yjernite/retribert-base-uncased': 512,
}
_a : Tuple = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class a_ ( a ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Any = PRETRAINED_INIT_CONFIGURATION
A__ : Optional[Any] = RetriBertTokenizer
A__ : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict="[UNK]" , UpperCAmelCase__ : str="[SEP]" , UpperCAmelCase__ : Union[str, Any]="[PAD]" , UpperCAmelCase__ : Dict="[CLS]" , UpperCAmelCase__ : Optional[Any]="[MASK]" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Dict , ):
"""simple docstring"""
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
snake_case : int = getattr(UpperCAmelCase__ , normalizer_state.pop('''type''' ) )
snake_case : List[Any] = do_lower_case
snake_case : Union[str, Any] = strip_accents
snake_case : int = tokenize_chinese_chars
snake_case : int = normalizer_class(**UpperCAmelCase__ )
snake_case : Union[str, Any] = do_lower_case
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=None ):
"""simple docstring"""
snake_case : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : List[Any] = [self.sep_token_id]
snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
snake_case : Tuple = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 84 | 1 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_a : Dict = logging.get_logger(__name__)
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = None ) -> str:
"""simple docstring"""
snake_case : Any = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
snake_case : str = to_pil_image(__magic_name__ )
snake_case , snake_case : Union[str, Any] = pil_image.size
snake_case : List[Any] = pytesseract.image_to_data(__magic_name__ , lang=__magic_name__ , output_type='''dict''' , config=__magic_name__ )
snake_case , snake_case , snake_case , snake_case , snake_case : Optional[Any] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
snake_case : Union[str, Any] = [idx for idx, word in enumerate(__magic_name__ ) if not word.strip()]
snake_case : Union[str, Any] = [word for idx, word in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : Optional[Any] = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case : List[Any] = []
for x, y, w, h in zip(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
snake_case : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(__magic_name__ )
# finally, normalize the bounding boxes
snake_case : List[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__magic_name__ , __magic_name__ , __magic_name__ ) )
assert len(__magic_name__ ) == len(__magic_name__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a_ ( a ):
A__ : int = ['pixel_values']
def __init__( self : Optional[int] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "" , **UpperCAmelCase__ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Any = size if size is not None else {'''height''': 224, '''width''': 224}
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : Dict = do_resize
snake_case : str = size
snake_case : Optional[int] = resample
snake_case : Union[str, Any] = apply_ocr
snake_case : int = ocr_lang
snake_case : Union[str, Any] = tesseract_config
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ):
"""simple docstring"""
snake_case : Dict = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
snake_case : Tuple = (size['''height'''], size['''width'''])
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : List[Any] , ):
"""simple docstring"""
snake_case : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case : List[Any] = size if size is not None else self.size
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : str = resample if resample is not None else self.resample
snake_case : Optional[int] = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case : List[str] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
snake_case : Any = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
snake_case : Optional[int] = []
snake_case : Union[str, Any] = []
for image in images:
snake_case , snake_case : List[Any] = apply_tesseract(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
words_batch.append(UpperCAmelCase__ )
boxes_batch.append(UpperCAmelCase__ )
if do_resize:
snake_case : Any = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case : int = [flip_channel_order(UpperCAmelCase__ ) for image in images]
snake_case : Optional[Any] = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
snake_case : List[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase__ )
if apply_ocr:
snake_case : Dict = words_batch
snake_case : Dict = boxes_batch
return data
| 84 |
import string
import numpy
def a_ ( __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , __magic_name__ )
class a_ :
A__ : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
A__ : List[str] = numpy.vectorize(lambda a : x % 36 )
A__ : Dict = numpy.vectorize(a )
def __init__( self : List[str] , UpperCAmelCase__ : numpy.ndarray ):
"""simple docstring"""
snake_case : int = self.modulus(UpperCAmelCase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
snake_case : List[str] = encrypt_key.shape[0]
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
return self.key_string.index(UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : int ):
"""simple docstring"""
return self.key_string[round(UpperCAmelCase__ )]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : Tuple = det % len(self.key_string )
snake_case : Tuple = len(self.key_string )
if greatest_common_divisor(UpperCAmelCase__ , len(self.key_string ) ) != 1:
snake_case : List[Any] = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = [char for char in text.upper() if char in self.key_string]
snake_case : Optional[int] = chars[-1]
while len(UpperCAmelCase__ ) % self.break_key != 0:
chars.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = self.process_text(text.upper() )
snake_case : Optional[int] = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : int = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : Tuple = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(self.encrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[
0
]
snake_case : Dict = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : int = det % len(self.key_string )
snake_case : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
snake_case : Any = i
break
snake_case : Any = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCAmelCase__ ) )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Any = self.make_decrypt_key()
snake_case : Optional[Any] = self.process_text(text.upper() )
snake_case : int = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : Any = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : List[str] = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(decrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[0]
snake_case : int = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a_ ( ) -> None:
"""simple docstring"""
snake_case : Any = int(input('''Enter the order of the encryption key: ''' ) )
snake_case : List[Any] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(__magic_name__ ):
snake_case : Optional[Any] = [int(__magic_name__ ) for x in input().split()]
hill_matrix.append(__magic_name__ )
snake_case : List[str] = HillCipher(numpy.array(__magic_name__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
snake_case : int = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
snake_case : List[Any] = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(__magic_name__ ) )
elif option == "2":
snake_case : int = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 84 | 1 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Dict = '▁'
_a : Tuple = {'vocab_file': 'prophetnet.tokenizer'}
_a : Union[str, Any] = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
_a : Optional[Any] = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
_a : int = {
'microsoft/xprophetnet-large-wiki100-cased': 512,
}
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
snake_case : List[Any] = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
snake_case : str = reader.readlines()
for index, token in enumerate(__magic_name__ ):
snake_case : Optional[int] = token.rstrip('''\n''' )
snake_case : Union[str, Any] = index
return vocab
class a_ ( a ):
A__ : Optional[int] = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Dict = ['input_ids', 'attention_mask']
def __init__( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int]="[SEP]" , UpperCAmelCase__ : Any="[SEP]" , UpperCAmelCase__ : Optional[Any]="[SEP]" , UpperCAmelCase__ : int="[UNK]" , UpperCAmelCase__ : List[str]="[PAD]" , UpperCAmelCase__ : Tuple="[CLS]" , UpperCAmelCase__ : List[str]="[MASK]" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : int , ):
"""simple docstring"""
snake_case : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase__ ) )
snake_case : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
snake_case : Union[str, Any] = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
snake_case : Dict = F"[unused{i}]"
snake_case : List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
snake_case : Optional[Any] = 12
snake_case : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase__ )
def __getstate__( self : int ):
"""simple docstring"""
snake_case : List[Any] = self.__dict__.copy()
snake_case : Dict = None
return state
def __setstate__( self : Dict , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
snake_case : Tuple = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case : Optional[int] = {}
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return ([0] * len(UpperCAmelCase__ )) + [1]
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1]
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : int = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Any = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : str ):
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : str = self.sp_model.PieceToId(UpperCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase( self : int , UpperCAmelCase__ : int ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : Dict ):
"""simple docstring"""
snake_case : Optional[Any] = ''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , ''' ''' ).strip()
return out_string
def lowerCAmelCase( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case : Any = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , '''wb''' ) as fi:
snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
snake_case : Any = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 84 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( a ):
A__ : List[Any] = 'Salesforce/blip-image-captioning-base'
A__ : Dict = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
A__ : str = 'image_captioner'
A__ : Dict = AutoModelForVisionaSeq
A__ : Optional[Any] = ['image']
A__ : List[str] = ['text']
def __init__( self : List[str] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=UpperCAmelCase__ , return_tensors='''pt''' )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return self.model.generate(**UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0].strip()
| 84 | 1 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class a_ :
def __init__( self : Tuple , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
snake_case : List[Any] = data
snake_case : Tuple = [0X67_452_301, 0XEF_CDA_B89, 0X98_BAD_CFE, 0X10_325_476, 0XC3_D2E_1F0]
@staticmethod
def lowerCAmelCase( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any ):
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XFF_FFF_FFF
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : str = b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64)
snake_case : Optional[Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Any ):
"""simple docstring"""
snake_case : List[str] = list(struct.unpack('''>16L''' , UpperCAmelCase__ ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : int = self.padding()
snake_case : int = self.split_blocks()
for block in self.blocks:
snake_case : List[Any] = self.expand_block(UpperCAmelCase__ )
snake_case , snake_case , snake_case , snake_case , snake_case : Tuple = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case : Union[str, Any] = (b & c) | ((~b) & d)
snake_case : int = 0X5A_827_999
elif 20 <= i < 40:
snake_case : Tuple = b ^ c ^ d
snake_case : List[Any] = 0X6E_D9E_BA1
elif 40 <= i < 60:
snake_case : int = (b & c) | (b & d) | (c & d)
snake_case : List[str] = 0X8F_1BB_CDC
elif 60 <= i < 80:
snake_case : int = b ^ c ^ d
snake_case : Tuple = 0XCA_62C_1D6
snake_case , snake_case , snake_case , snake_case , snake_case : Optional[int] = (
self.rotate(UpperCAmelCase__ , 5 ) + f + e + k + expanded_block[i] & 0XFF_FFF_FFF,
a,
self.rotate(UpperCAmelCase__ , 30 ),
c,
d,
)
snake_case : Optional[Any] = (
self.h[0] + a & 0XFF_FFF_FFF,
self.h[1] + b & 0XFF_FFF_FFF,
self.h[2] + c & 0XFF_FFF_FFF,
self.h[3] + d & 0XFF_FFF_FFF,
self.h[4] + e & 0XFF_FFF_FFF,
)
return ("{:08x}" * 5).format(*self.h )
def a_ ( ) -> int:
"""simple docstring"""
snake_case : Tuple = b'''Test String'''
assert SHAaHash(__magic_name__ ).final_hash() == hashlib.shaa(__magic_name__ ).hexdigest() # noqa: S324
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[Any] = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
snake_case : Tuple = parser.parse_args()
snake_case : Tuple = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
snake_case : Tuple = f.read()
else:
snake_case : Union[str, Any] = bytes(__magic_name__ , '''utf-8''' )
print(SHAaHash(__magic_name__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 84 |
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
snake_case : int = 4
snake_case : Optional[Any] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 84 | 1 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1_024 ) -> Optional[int]:
"""simple docstring"""
snake_case , snake_case : Optional[int] = [], []
snake_case : Union[str, Any] = list(zip(__magic_name__ , __magic_name__ ) )
snake_case , snake_case : Union[str, Any] = sorted_examples[0]
def is_too_big(__magic_name__ ):
return tok(__magic_name__ , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
snake_case : str = new_src + ''' ''' + src
snake_case : Tuple = new_tgt + ''' ''' + tgt
if is_too_big(__magic_name__ ) or is_too_big(__magic_name__ ): # cant fit, finalize example
finished_src.append(__magic_name__ )
finished_tgt.append(__magic_name__ )
snake_case , snake_case : Optional[int] = src, tgt
else: # can fit, keep adding
snake_case , snake_case : Dict = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__magic_name__ )
finished_tgt.append(__magic_name__ )
return finished_src, finished_tgt
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
"""simple docstring"""
snake_case : Any = Path(__magic_name__ )
save_path.mkdir(exist_ok=__magic_name__ )
for split in ["train"]:
snake_case , snake_case : List[str] = data_dir / F"{split}.source", data_dir / F"{split}.target"
snake_case : Optional[int] = [x.rstrip() for x in Path(__magic_name__ ).open().readlines()]
snake_case : int = [x.rstrip() for x in Path(__magic_name__ ).open().readlines()]
snake_case , snake_case : List[Any] = pack_examples(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
print(F"packed {split} split from {len(__magic_name__ )} examples -> {len(__magic_name__ )}." )
Path(save_path / F"{split}.source" ).open('''w''' ).write('''\n'''.join(__magic_name__ ) )
Path(save_path / F"{split}.target" ).open('''w''' ).write('''\n'''.join(__magic_name__ ) )
for split in ["val", "test"]:
snake_case , snake_case : Optional[int] = data_dir / F"{split}.source", data_dir / F"{split}.target"
shutil.copyfile(__magic_name__ , save_path / F"{split}.source" )
shutil.copyfile(__magic_name__ , save_path / F"{split}.target" )
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=__magic_name__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=__magic_name__ , default=128 )
parser.add_argument('''--data_dir''' , type=__magic_name__ )
parser.add_argument('''--save_path''' , type=__magic_name__ )
snake_case : Optional[int] = parser.parse_args()
snake_case : Optional[int] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__magic_name__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 84 |
from sklearn.metrics import fa_score
import datasets
_a : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
_a : Dict = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
_a : List[Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : List[str]="binary" , UpperCAmelCase__ : str=None ):
"""simple docstring"""
snake_case : List[Any] = fa_score(
UpperCAmelCase__ , UpperCAmelCase__ , labels=UpperCAmelCase__ , pos_label=UpperCAmelCase__ , average=UpperCAmelCase__ , sample_weight=UpperCAmelCase__ )
return {"f1": float(UpperCAmelCase__ ) if score.size == 1 else score}
| 84 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
"""simple docstring"""
if gpta_config_file == "":
snake_case : List[Any] = GPTaConfig()
else:
snake_case : str = GPTaConfig.from_json_file(__magic_name__ )
snake_case : str = GPTaModel(__magic_name__ )
# Load weights from numpy
load_tf_weights_in_gpta(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
snake_case : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , __magic_name__ )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
_a : Optional[int] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 84 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('''only integers accepted as input''' )
else:
snake_case : str = str(abs(__magic_name__ ) )
snake_case : Optional[Any] = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int(''''''.join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 84 | 1 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( a ):
def __init__( self : Any , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : int , ):
"""simple docstring"""
super().__init__()
snake_case : Tuple = value_function
snake_case : Union[str, Any] = unet
snake_case : Dict = scheduler
snake_case : Union[str, Any] = env
snake_case : Union[str, Any] = env.get_dataset()
snake_case : str = {}
for key in self.data.keys():
try:
snake_case : List[str] = self.data[key].mean()
except: # noqa: E722
pass
snake_case : int = {}
for key in self.data.keys():
try:
snake_case : Dict = self.data[key].std()
except: # noqa: E722
pass
snake_case : Dict = env.observation_space.shape[0]
snake_case : List[str] = env.action_space.shape[0]
def lowerCAmelCase( self : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ):
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def lowerCAmelCase( self : str , UpperCAmelCase__ : List[str] ):
"""simple docstring"""
if type(UpperCAmelCase__ ) is dict:
return {k: self.to_torch(UpperCAmelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(UpperCAmelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(UpperCAmelCase__ , device=self.unet.device )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
for key, val in cond.items():
snake_case : Optional[int] = val.clone()
return x_in
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int ):
"""simple docstring"""
snake_case : List[str] = x.shape[0]
snake_case : Optional[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
snake_case : List[Any] = torch.full((batch_size,) , UpperCAmelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(UpperCAmelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
snake_case : str = self.value_function(x.permute(0 , 2 , 1 ) , UpperCAmelCase__ ).sample
snake_case : List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
snake_case : str = self.scheduler._get_variance(UpperCAmelCase__ )
snake_case : Dict = torch.exp(0.5 * posterior_variance )
snake_case : Dict = model_std * grad
snake_case : List[str] = 0
snake_case : Optional[Any] = x.detach()
snake_case : Optional[int] = x + scale * grad
snake_case : int = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim )
snake_case : Tuple = self.unet(x.permute(0 , 2 , 1 ) , UpperCAmelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
snake_case : Dict = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , predict_epsilon=UpperCAmelCase__ )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
snake_case : Dict = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim )
snake_case : Union[str, Any] = self.to_torch(UpperCAmelCase__ )
return x, y
def __call__( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=64 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Dict=0.1 ):
"""simple docstring"""
# normalize the observations and create batch dimension
snake_case : List[str] = self.normalize(UpperCAmelCase__ , '''observations''' )
snake_case : str = obs[None].repeat(UpperCAmelCase__ , axis=0 )
snake_case : List[str] = {0: self.to_torch(UpperCAmelCase__ )}
snake_case : Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
snake_case : Dict = randn_tensor(UpperCAmelCase__ , device=self.unet.device )
snake_case : Union[str, Any] = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim )
snake_case : Tuple = self.to_torch(UpperCAmelCase__ )
# run the diffusion process
snake_case , snake_case : str = self.run_diffusion(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# sort output trajectories by value
snake_case : Dict = y.argsort(0 , descending=UpperCAmelCase__ ).squeeze()
snake_case : Optional[Any] = x[sorted_idx]
snake_case : str = sorted_values[:, :, : self.action_dim]
snake_case : Optional[Any] = actions.detach().cpu().numpy()
snake_case : Union[str, Any] = self.de_normalize(UpperCAmelCase__ , key='''actions''' )
# select the action with the highest value
if y is not None:
snake_case : Tuple = 0
else:
# if we didn't run value guiding, select a random action
snake_case : Tuple = np.random.randint(0 , UpperCAmelCase__ )
snake_case : Dict = denorm_actions[selected_index, 0]
return denorm_actions
| 84 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=99 , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Any=9 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : str=0.002 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , ):
"""simple docstring"""
snake_case : Union[str, Any] = parent
snake_case : Union[str, Any] = batch_size
snake_case : Any = encoder_seq_length
snake_case : str = decoder_seq_length
# For common tests
snake_case : Optional[int] = self.decoder_seq_length
snake_case : Optional[Any] = is_training
snake_case : List[Any] = use_attention_mask
snake_case : Union[str, Any] = use_labels
snake_case : Any = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : Any = d_ff
snake_case : Any = relative_attention_num_buckets
snake_case : Optional[Any] = dropout_rate
snake_case : int = initializer_factor
snake_case : Optional[Any] = eos_token_id
snake_case : Dict = pad_token_id
snake_case : Optional[Any] = decoder_start_token_id
snake_case : Union[str, Any] = None
snake_case : List[str] = decoder_layers
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
return TaConfig.from_pretrained('''google/umt5-base''' )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
snake_case : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case : List[Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if decoder_head_mask is None:
snake_case : Tuple = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if cross_attn_head_mask is None:
snake_case : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case : List[str] = input_ids.clamp(self.pad_token_id + 1 )
snake_case : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case : str = self.get_config()
snake_case : Tuple = config.num_attention_heads
snake_case : List[Any] = self.prepare_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, input_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , ):
"""simple docstring"""
snake_case : str = UMTaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : str = model(
input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , )
snake_case : int = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
snake_case : int = result.last_hidden_state
snake_case : Dict = result.past_key_values
snake_case : Dict = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval()
# first forward pass
snake_case : List[Any] = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
snake_case : List[Any] = model(UpperCAmelCase__ )
snake_case : Any = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) + 1 )
snake_case , snake_case : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : Any = model(UpperCAmelCase__ )['''last_hidden_state''']
snake_case : Optional[Any] = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )['''last_hidden_state''']
# select random slice
snake_case : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).half().eval()
snake_case : str = model(**UpperCAmelCase__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(UpperCAmelCase__ ).any().item() )
@require_torch
class a_ ( a , a , a , unittest.TestCase ):
A__ : str = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A__ : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A__ : Any = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A__ : Dict = True
A__ : List[str] = False
A__ : Optional[int] = False
A__ : Optional[int] = True
A__ : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A__ : int = [0.8, 0.9]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
snake_case : Optional[Any] = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=UpperCAmelCase__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Optional[int] = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
snake_case : int = config_and_inputs[0]
snake_case : Union[str, Any] = UMTaForConditionalGeneration(UpperCAmelCase__ ).eval()
model.to(UpperCAmelCase__ )
snake_case : str = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase__ , head_masking.items() ):
snake_case : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case : List[str] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ )
snake_case : Union[str, Any] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , **UpperCAmelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=UpperCAmelCase__ ).to(UpperCAmelCase__ )
snake_case : int = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=UpperCAmelCase__ , legacy=UpperCAmelCase__ )
snake_case : List[str] = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
snake_case : Dict = tokenizer(UpperCAmelCase__ , return_tensors='''pt''' , padding=UpperCAmelCase__ ).input_ids
# fmt: off
snake_case : Optional[Any] = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = model.generate(input_ids.to(UpperCAmelCase__ ) )
snake_case : int = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
snake_case : Tuple = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 84 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.