code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_a : str = TypeVar('T')
class a_ ( Generic[T] ):
def __init__( self : str , UpperCAmelCase__ : T ):
"""simple docstring"""
snake_case : List[str] = data
snake_case : Node[T] | None = None
def __str__( self : Optional[int] ):
"""simple docstring"""
return F"{self.data}"
class a_ ( Generic[T] ):
def __init__( self : List[Any] ):
"""simple docstring"""
snake_case : Node[T] | None = None
def __iter__( self : Dict ):
"""simple docstring"""
snake_case : str = self.top
while node:
yield node.data
snake_case : List[Any] = node.next
def __str__( self : Tuple ):
"""simple docstring"""
return "->".join([str(UpperCAmelCase__ ) for item in self] )
def __len__( self : Any ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return self.top is None
def lowerCAmelCase( self : int , UpperCAmelCase__ : T ):
"""simple docstring"""
snake_case : int = Node(UpperCAmelCase__ )
if not self.is_empty():
snake_case : Any = self.top
snake_case : Tuple = node
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , UpperCAmelCase__ )
snake_case : Tuple = self.top
snake_case : List[Any] = self.top.next
return pop_node.data
def lowerCAmelCase( self : str ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Union[str, Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 711 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_a : Optional[Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
_a : str = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
_a : List[Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Optional[int]=0.9 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : List[Any]=500 , UpperCAmelCase__ : Union[str, Any]="gpt2-large" , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : List[Any]=25 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=25 , ):
"""simple docstring"""
snake_case : List[str] = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 84 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {'tokenizer_file': 'tokenizer.json'}
_a : List[Any] = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class a_ ( a ):
A__ : Optional[int] = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : Union[str, Any] = ['input_ids', 'attention_mask']
A__ : str = None
def __init__( self : List[str] , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=False , **UpperCAmelCase__ : Tuple , ):
"""simple docstring"""
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase__ ) != add_prefix_space:
snake_case : Tuple = getattr(UpperCAmelCase__ , pre_tok_state.pop('''type''' ) )
snake_case : Optional[int] = add_prefix_space
snake_case : List[str] = pre_tok_class(**UpperCAmelCase__ )
snake_case : Any = add_prefix_space
def lowerCAmelCase( self : Tuple , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
snake_case : str = kwargs.get('''is_split_into_words''' , UpperCAmelCase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : str , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
snake_case : List[str] = kwargs.get('''is_split_into_words''' , UpperCAmelCase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
''' pretokenized inputs.''' )
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
snake_case : Union[str, Any] = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : "Conversation" ):
"""simple docstring"""
snake_case : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) + [self.eos_token_id] )
if len(UpperCAmelCase__ ) > self.model_max_length:
snake_case : int = input_ids[-self.model_max_length :]
return input_ids
| 712 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
if "cls_token" in name:
snake_case : Tuple = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
snake_case : Optional[int] = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
snake_case : List[str] = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case : List[Any] = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case : int = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
snake_case : int = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
snake_case : Optional[Any] = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
snake_case : str = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case : Any = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
snake_case : Dict = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
snake_case : Dict = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
snake_case : Dict = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
snake_case : Optional[int] = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def a_ ( __magic_name__ , __magic_name__ ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case : Union[str, Any] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
snake_case : Optional[int] = key.split('''.''' )
snake_case : int = int(key_split[1] )
if "decoder_blocks" in key:
snake_case : List[str] = config.decoder_hidden_size
snake_case : List[Any] = '''decoder.decoder_layers.'''
if "weight" in key:
snake_case : str = val[:dim, :]
snake_case : Optional[Any] = val[dim : dim * 2, :]
snake_case : Any = val[-dim:, :]
elif "bias" in key:
snake_case : Optional[Any] = val[:dim]
snake_case : List[Any] = val[dim : dim * 2]
snake_case : List[Any] = val[-dim:]
else:
snake_case : Optional[int] = config.hidden_size
snake_case : Tuple = '''vit.encoder.layer.'''
if "weight" in key:
snake_case : Optional[Any] = val[:dim, :]
snake_case : str = val[dim : dim * 2, :]
snake_case : Union[str, Any] = val[-dim:, :]
elif "bias" in key:
snake_case : Tuple = val[:dim]
snake_case : int = val[dim : dim * 2]
snake_case : Optional[Any] = val[-dim:]
else:
snake_case : Optional[Any] = val
return orig_state_dict
def a_ ( __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : List[str] = ViTMAEConfig()
if "large" in checkpoint_url:
snake_case : str = 1_024
snake_case : Tuple = 4_096
snake_case : Optional[Any] = 24
snake_case : List[Any] = 16
elif "huge" in checkpoint_url:
snake_case : Tuple = 14
snake_case : int = 1_280
snake_case : Dict = 5_120
snake_case : Tuple = 32
snake_case : Optional[Any] = 16
snake_case : Optional[Any] = ViTMAEForPreTraining(__magic_name__ )
snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''model''']
snake_case : int = ViTMAEImageProcessor(size=config.image_size )
snake_case : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
snake_case : Tuple = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
snake_case : List[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
snake_case : Dict = ViTMAEImageProcessor(size=config.image_size )
snake_case : str = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
snake_case : Union[str, Any] = model(**__magic_name__ )
snake_case : Optional[Any] = outputs.logits
if "large" in checkpoint_url:
snake_case : Any = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
snake_case : List[Any] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
snake_case : Dict = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a : str = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 84 | 0 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_a : List[str] = True
from torch.cuda.amp import autocast
_a : Optional[int] = logging.getLogger(__name__)
@dataclass
class a_ :
A__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A__ : Optional[bool] = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
A__ : Optional[bool] = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
A__ : Optional[float] = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
A__ : Optional[float] = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
A__ : Optional[float] = field(
default=0.999995 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def a_ ( __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
snake_case : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
snake_case : List[Any] = logging.INFO
logger.setLevel(__magic_name__ )
@dataclass
class a_ :
A__ : str = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A__ : Optional[str] = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
A__ : Optional[str] = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
A__ : Optional[str] = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
A__ : bool = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
A__ : Optional[int] = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
A__ : Optional[int] = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
A__ : Optional[float] = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class a_ :
A__ : WavaVecaForPreTraining
A__ : WavaVecaFeatureExtractor
A__ : Union[bool, str] = "longest"
A__ : Optional[int] = None
A__ : Optional[int] = None
def __call__( self : List[str] , UpperCAmelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ):
"""simple docstring"""
snake_case : str = self.feature_extractor.pad(
UpperCAmelCase__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
snake_case : Optional[int] = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
snake_case : Optional[Any] = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
snake_case : Optional[Any] = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
snake_case : Dict = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
snake_case : Optional[Any] = 1
snake_case : List[Any] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
snake_case : Union[str, Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCAmelCase__ , min_masks=2 , )
return batch
class a_ ( a ):
def __init__( self : Any , *UpperCAmelCase__ : str , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : int=1.0 , **UpperCAmelCase__ : Any ):
"""simple docstring"""
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
snake_case : Any = 0
snake_case : List[str] = max_gumbel_temp
snake_case : Optional[Any] = min_gumbel_temp
snake_case : Tuple = gumbel_temp_decay
def lowerCAmelCase( self : str , UpperCAmelCase__ : nn.Module , UpperCAmelCase__ : Dict[str, Union[torch.Tensor, Any]] ):
"""simple docstring"""
model.train()
snake_case : Optional[Any] = self._prepare_inputs(UpperCAmelCase__ )
if self.use_amp:
with autocast():
snake_case : Dict = self.compute_loss(UpperCAmelCase__ , UpperCAmelCase__ )
else:
snake_case : Tuple = self.compute_loss(UpperCAmelCase__ , UpperCAmelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
snake_case : Tuple = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
snake_case : Optional[Any] = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
snake_case : Optional[Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCAmelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCAmelCase__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCAmelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case : str = parser.parse_args_into_dataclasses()
configure_logger(__magic_name__ , __magic_name__ )
# Downloading and loading a dataset from the hub.
snake_case : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
snake_case : Dict = DatasetDict()
snake_case : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
snake_case : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
snake_case : Tuple = DatasetDict()
snake_case : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
snake_case : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
snake_case : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__magic_name__ )
def prepare_dataset(__magic_name__ ):
# check that all files have the correct sampling rate
snake_case : List[str] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
snake_case : Dict = datasets.map(
__magic_name__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
snake_case : List[str] = vectorized_datasets.filter(
lambda __magic_name__ : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__magic_name__ ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
snake_case : Dict = vectorized_datasets.map(
__magic_name__ , batched=__magic_name__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
snake_case : List[str] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
snake_case : Optional[int] = WavaVecaForPreTraining(__magic_name__ )
snake_case : Optional[int] = DataCollatorForWavaVecaPretraining(model=__magic_name__ , feature_extractor=__magic_name__ )
snake_case : int = WavaVecaPreTrainer(
model=__magic_name__ , data_collator=__magic_name__ , args=__magic_name__ , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=__magic_name__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 713 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : Optional[Any] = 16
_a : Union[str, Any] = 32
def a_ ( __magic_name__ , __magic_name__ = 16 ) -> Dict:
"""simple docstring"""
snake_case : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case : Union[str, Any] = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case : Dict = 8
else:
snake_case : Union[str, Any] = None
return tokenizer.pad(
__magic_name__ , padding='''longest''' , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
snake_case : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : Optional[int] = mocked_dataloaders # noqa: F811
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __magic_name__ ) == "1":
snake_case : Optional[int] = 2
# Initialize accelerator
snake_case : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : Dict = config['''lr''']
snake_case : Any = int(config['''num_epochs'''] )
snake_case : List[str] = int(config['''seed'''] )
snake_case : List[Any] = int(config['''batch_size'''] )
snake_case : Tuple = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : str = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case : Optional[int] = AdamW(params=model.parameters() , lr=__magic_name__ )
snake_case , snake_case : List[Any] = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate scheduler
snake_case : int = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case : Tuple = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case : int = model(**__magic_name__ )
snake_case : Optional[int] = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case : List[str] = model(**__magic_name__ )
snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
snake_case , snake_case : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
snake_case : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__magic_name__ , default=__magic_name__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case : Optional[Any] = parser.parse_args()
snake_case : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 84 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class a_ ( a ):
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Dict = 5
# Realm tok
snake_case : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
snake_case : Any = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[int] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Dict = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=UpperCAmelCase__ , )
return block_records
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Tuple = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = self.get_config()
snake_case : Optional[Any] = self.get_dummy_retriever()
snake_case : Optional[int] = retriever.tokenizer
snake_case : Dict = np.array([0, 3] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Union[str, Any] = tokenizer(
['''the fourth'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : Optional[Any] = config.reader_seq_len
snake_case : List[str] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.get_config()
snake_case : Optional[int] = self.get_dummy_retriever()
snake_case : List[str] = retriever.tokenizer
snake_case : Optional[Any] = np.array([0, 3, 5] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Any = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : List[Any] = config.reader_seq_len
snake_case : Union[str, Any] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual([False, True, True] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
snake_case : Optional[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
snake_case : Any = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case : Any = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 714 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_a : Dict = logging.get_logger(__name__)
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = None ) -> str:
"""simple docstring"""
snake_case : Any = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
snake_case : str = to_pil_image(__magic_name__ )
snake_case , snake_case : Union[str, Any] = pil_image.size
snake_case : List[Any] = pytesseract.image_to_data(__magic_name__ , lang=__magic_name__ , output_type='''dict''' , config=__magic_name__ )
snake_case , snake_case , snake_case , snake_case , snake_case : Optional[Any] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
snake_case : Union[str, Any] = [idx for idx, word in enumerate(__magic_name__ ) if not word.strip()]
snake_case : Union[str, Any] = [word for idx, word in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : Optional[Any] = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case : List[Any] = []
for x, y, w, h in zip(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
snake_case : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(__magic_name__ )
# finally, normalize the bounding boxes
snake_case : List[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__magic_name__ , __magic_name__ , __magic_name__ ) )
assert len(__magic_name__ ) == len(__magic_name__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a_ ( a ):
A__ : int = ['pixel_values']
def __init__( self : Optional[int] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "" , **UpperCAmelCase__ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Any = size if size is not None else {'''height''': 224, '''width''': 224}
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : Dict = do_resize
snake_case : str = size
snake_case : Optional[int] = resample
snake_case : Union[str, Any] = apply_ocr
snake_case : int = ocr_lang
snake_case : Union[str, Any] = tesseract_config
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ):
"""simple docstring"""
snake_case : Dict = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
snake_case : Tuple = (size['''height'''], size['''width'''])
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : List[Any] , ):
"""simple docstring"""
snake_case : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case : List[Any] = size if size is not None else self.size
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : str = resample if resample is not None else self.resample
snake_case : Optional[int] = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case : List[str] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
snake_case : Any = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
snake_case : Optional[int] = []
snake_case : Union[str, Any] = []
for image in images:
snake_case , snake_case : List[Any] = apply_tesseract(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
words_batch.append(UpperCAmelCase__ )
boxes_batch.append(UpperCAmelCase__ )
if do_resize:
snake_case : Any = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case : int = [flip_channel_order(UpperCAmelCase__ ) for image in images]
snake_case : Optional[Any] = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
snake_case : List[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase__ )
if apply_ocr:
snake_case : Dict = words_batch
snake_case : Dict = boxes_batch
return data
| 84 | 0 |
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
_a : Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
for attribute in key.split('''.''' ):
snake_case : int = getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
snake_case : Optional[int] = getattr(__magic_name__ , __magic_name__ ).shape
else:
snake_case : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
snake_case : str = value
elif weight_type == "weight_g":
snake_case : List[Any] = value
elif weight_type == "weight_v":
snake_case : Tuple = value
elif weight_type == "bias":
snake_case : List[Any] = value
else:
snake_case : Union[str, Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def a_ ( __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
snake_case : Dict = []
snake_case : Optional[Any] = fairseq_model.state_dict()
snake_case : Optional[Any] = hf_model.feature_extractor
snake_case : Tuple = hf_model.adapter
for name, value in fairseq_dict.items():
snake_case : Tuple = False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , )
snake_case : Optional[Any] = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
snake_case : Any = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
snake_case : Tuple = True
if "*" in mapped_key:
snake_case : Optional[int] = name.split(__magic_name__ )[0].split('''.''' )[-2]
snake_case : Dict = mapped_key.replace('''*''' , __magic_name__ )
if "weight_g" in name:
snake_case : Tuple = '''weight_g'''
elif "weight_v" in name:
snake_case : Tuple = '''weight_v'''
elif "bias" in name:
snake_case : Union[str, Any] = '''bias'''
elif "weight" in name:
snake_case : Union[str, Any] = '''weight'''
else:
snake_case : List[Any] = None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(F"Unused weights: {unused_weights}" )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[Any] = full_name.split('''conv_layers.''' )[-1]
snake_case : Tuple = name.split('''.''' )
snake_case : List[Any] = int(items[0] )
snake_case : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
snake_case : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
snake_case : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
snake_case : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
snake_case : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__magic_name__ )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case : Any = full_name.split('''adaptor.''' )[-1]
snake_case : Union[str, Any] = name.split('''.''' )
if items[1].isdigit():
snake_case : Tuple = int(items[1] )
else:
snake_case : int = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
snake_case : Dict = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
snake_case : int = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
snake_case : List[str] = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
snake_case : Optional[Any] = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(__magic_name__ , __magic_name__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
snake_case : List[Any] = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
snake_case : int = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(__magic_name__ )
def a_ ( __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[Any] = emb.weight.shape
snake_case : List[str] = nn.Linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
snake_case : Tuple = emb.weight.data
return lin_layer
@torch.no_grad()
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> Any:
"""simple docstring"""
snake_case : int = WavaVecaConfig.from_pretrained(
__magic_name__ , add_adapter=__magic_name__ , adapter_stride=__magic_name__ , adapter_kernel_size=__magic_name__ , use_auth_token=__magic_name__ , output_hidden_size=__magic_name__ , )
snake_case : Union[str, Any] = MBartConfig.from_pretrained(__magic_name__ )
# load model
snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
snake_case : List[Any] = model[0].eval()
# load feature extractor
snake_case : str = WavaVecaFeatureExtractor.from_pretrained(__magic_name__ , use_auth_token=__magic_name__ )
# set weights for wav2vec2 encoder
snake_case : Dict = WavaVecaModel(__magic_name__ )
recursively_load_weights_wavaveca(model.encoder , __magic_name__ )
# load decoder weights
snake_case : Optional[int] = MBartForCausalLM(__magic_name__ )
snake_case : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__magic_name__ )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
snake_case : Optional[Any] = SpeechEncoderDecoderModel(encoder=__magic_name__ , decoder=__magic_name__ )
snake_case : Union[str, Any] = False
snake_case : Union[str, Any] = MBartaaTokenizer(__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
snake_case : List[Any] = hf_wavavec.config.to_dict()
snake_case : List[Any] = tokenizer.pad_token_id
snake_case : Dict = tokenizer.bos_token_id
snake_case : List[str] = tokenizer.eos_token_id
snake_case : str = '''mbart50'''
snake_case : Optional[int] = '''wav2vec2'''
snake_case : str = tokenizer.eos_token_id
snake_case : Optional[int] = 250_004
snake_case : Union[str, Any] = tokenizer.eos_token_id
snake_case : List[Any] = SpeechEncoderDecoderConfig.from_dict(__magic_name__ )
hf_wavavec.save_pretrained(__magic_name__ )
feature_extractor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1_024, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=250_004, type=int, help='`decoder_start_token_id` of model config')
_a : List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 715 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[Any]=24 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Optional[int]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Optional[Any]=2 , ):
"""simple docstring"""
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : str = patch_size
snake_case : Union[str, Any] = max_length
snake_case : str = num_mel_bins
snake_case : Any = is_training
snake_case : Union[str, Any] = use_labels
snake_case : Tuple = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : str = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : str = scope
snake_case : int = frequency_stride
snake_case : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
snake_case : Any = (self.max_length - self.patch_size) // self.time_stride + 1
snake_case : Union[str, Any] = frequency_out_dimension * time_out_dimension
snake_case : Union[str, Any] = num_patches + 2
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
snake_case : str = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[str] = self.get_config()
return config, input_values, labels
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : str = ASTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : int = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : int = config_and_inputs
snake_case : Tuple = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class a_ ( a , a , unittest.TestCase ):
A__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
A__ : int = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Dict = False
A__ : int = False
A__ : Optional[int] = False
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[int] = ASTModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Any = model_class(UpperCAmelCase__ )
snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : List[str] = ['''input_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = ASTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Dict = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
snake_case , snake_case : int = torchaudio.load(__magic_name__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class a_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : List[str] = self.default_feature_extractor
snake_case : str = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(UpperCAmelCase__ )
snake_case : str = self.default_feature_extractor
snake_case , snake_case : int = prepare_audio()
snake_case : Optional[int] = audio.squeeze().numpy()
snake_case : Optional[Any] = feature_extractor(UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case : Union[str, Any] = model(**UpperCAmelCase__ )
# verify the logits
snake_case : Any = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
snake_case : str = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 84 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : Union[str, Any] = logging.getLogger(__name__)
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class a_ :
A__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class a_ :
A__ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
A__ : str = field(metadata={'help': 'Should contain the data files for the task.'} )
A__ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A__ : bool = field(
default=a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __magic_name__ )
# Set seed
set_seed(training_args.seed )
try:
snake_case : int = processors[data_args.task_name]()
snake_case : List[str] = processor.get_labels()
snake_case : str = len(__magic_name__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case : Optional[int] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__magic_name__ ) -> Dict:
snake_case : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__magic_name__ , p.label_ids )}
# Data collator
snake_case : Dict = DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case : List[Any] = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case : Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case : Optional[Any] = trainer.evaluate()
snake_case : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__magic_name__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__magic_name__ )
return results
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 716 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : Union[str, Any] = logging.getLogger(__name__)
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class a_ :
A__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class a_ :
A__ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
A__ : str = field(metadata={'help': 'Should contain the data files for the task.'} )
A__ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A__ : bool = field(
default=a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case , snake_case , snake_case : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __magic_name__ )
# Set seed
set_seed(training_args.seed )
try:
snake_case : int = processors[data_args.task_name]()
snake_case : List[str] = processor.get_labels()
snake_case : str = len(__magic_name__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case : Optional[int] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__magic_name__ ) -> Dict:
snake_case : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__magic_name__ , p.label_ids )}
# Data collator
snake_case : Dict = DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case : List[Any] = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case : Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case : Optional[Any] = trainer.evaluate()
snake_case : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__magic_name__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__magic_name__ )
return results
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 84 | 0 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( a ):
def __init__( self : Any , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : int , ):
"""simple docstring"""
super().__init__()
snake_case : Tuple = value_function
snake_case : Union[str, Any] = unet
snake_case : Dict = scheduler
snake_case : Union[str, Any] = env
snake_case : Union[str, Any] = env.get_dataset()
snake_case : str = {}
for key in self.data.keys():
try:
snake_case : List[str] = self.data[key].mean()
except: # noqa: E722
pass
snake_case : int = {}
for key in self.data.keys():
try:
snake_case : Dict = self.data[key].std()
except: # noqa: E722
pass
snake_case : Dict = env.observation_space.shape[0]
snake_case : List[str] = env.action_space.shape[0]
def lowerCAmelCase( self : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ):
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def lowerCAmelCase( self : str , UpperCAmelCase__ : List[str] ):
"""simple docstring"""
if type(UpperCAmelCase__ ) is dict:
return {k: self.to_torch(UpperCAmelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(UpperCAmelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(UpperCAmelCase__ , device=self.unet.device )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
for key, val in cond.items():
snake_case : Optional[int] = val.clone()
return x_in
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int ):
"""simple docstring"""
snake_case : List[str] = x.shape[0]
snake_case : Optional[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
snake_case : List[Any] = torch.full((batch_size,) , UpperCAmelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(UpperCAmelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
snake_case : str = self.value_function(x.permute(0 , 2 , 1 ) , UpperCAmelCase__ ).sample
snake_case : List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
snake_case : str = self.scheduler._get_variance(UpperCAmelCase__ )
snake_case : Dict = torch.exp(0.5 * posterior_variance )
snake_case : Dict = model_std * grad
snake_case : List[str] = 0
snake_case : Optional[Any] = x.detach()
snake_case : Optional[int] = x + scale * grad
snake_case : int = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim )
snake_case : Tuple = self.unet(x.permute(0 , 2 , 1 ) , UpperCAmelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
snake_case : Dict = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , predict_epsilon=UpperCAmelCase__ )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
snake_case : Dict = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim )
snake_case : Union[str, Any] = self.to_torch(UpperCAmelCase__ )
return x, y
def __call__( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=64 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Dict=0.1 ):
"""simple docstring"""
snake_case : List[str] = self.normalize(UpperCAmelCase__ , '''observations''' )
snake_case : str = obs[None].repeat(UpperCAmelCase__ , axis=0 )
snake_case : List[str] = {0: self.to_torch(UpperCAmelCase__ )}
snake_case : Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
snake_case : Dict = randn_tensor(UpperCAmelCase__ , device=self.unet.device )
snake_case : Union[str, Any] = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim )
snake_case : Tuple = self.to_torch(UpperCAmelCase__ )
# run the diffusion process
snake_case : str = self.run_diffusion(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# sort output trajectories by value
snake_case : Dict = y.argsort(0 , descending=UpperCAmelCase__ ).squeeze()
snake_case : Optional[Any] = x[sorted_idx]
snake_case : str = sorted_values[:, :, : self.action_dim]
snake_case : Optional[Any] = actions.detach().cpu().numpy()
snake_case : Union[str, Any] = self.de_normalize(UpperCAmelCase__ , key='''actions''' )
# select the action with the highest value
if y is not None:
snake_case : Tuple = 0
else:
# if we didn't run value guiding, select a random action
snake_case : Tuple = np.random.randint(0 , UpperCAmelCase__ )
snake_case : Dict = denorm_actions[selected_index, 0]
return denorm_actions
| 717 |
import re
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
snake_case : List[str] = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
_a : Any = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 84 | 0 |
import requests
_a : Tuple = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def a_ ( __magic_name__ ) -> None:
"""simple docstring"""
snake_case : Tuple = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F"{i}.) {article['title']}" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 718 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : int=18 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Optional[int]=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=True , ):
"""simple docstring"""
snake_case : int = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = num_channels
snake_case : Union[str, Any] = image_size
snake_case : Dict = min_resolution
snake_case : Dict = max_resolution
snake_case : int = do_resize
snake_case : List[str] = size
snake_case : List[Any] = apply_ocr
def lowerCAmelCase( self : int ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a_ ( a , unittest.TestCase ):
A__ : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''apply_ocr''' ) )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
# Initialize image_processing
snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
snake_case : Dict = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : List[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : Tuple = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# with apply_OCR = True
snake_case : int = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case : List[Any] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
snake_case : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
snake_case : Any = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case : Optional[Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
snake_case : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
snake_case : str = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
snake_case : Optional[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 84 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_a : Optional[Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
_a : str = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
_a : List[Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Optional[int]=0.9 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : List[Any]=500 , UpperCAmelCase__ : Union[str, Any]="gpt2-large" , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : List[Any]=25 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=25 , ):
"""simple docstring"""
snake_case : List[str] = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 719 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=8 ) -> str:
"""simple docstring"""
snake_case : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if latents is None:
snake_case : int = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case : Optional[Any] = latents.to(UpperCAmelCase__ )
snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case : Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
snake_case : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Optional[int] = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
snake_case : Optional[int] = self._execution_device
snake_case : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Any = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : int = torch.cat(UpperCAmelCase__ , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Dict = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Tuple = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
snake_case : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
snake_case : str = self.scheduler.timesteps
snake_case : Optional[Any] = self.movq.config.latent_channels
snake_case , snake_case : Optional[Any] = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
snake_case : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Optional[int] = {'''image_embeds''': image_embeds, '''hint''': hint}
snake_case : Any = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Any = noise_pred.chunk(2 )
snake_case , snake_case : Dict = variance_pred.chunk(2 )
snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
snake_case : List[Any] = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case : Optional[Any] = image * 0.5 + 0.5
snake_case : int = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : str = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 0 |
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
snake_case : Any = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 720 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a_ ( a , unittest.TestCase ):
A__ : Dict = ReformerTokenizer
A__ : Optional[int] = ReformerTokenizerFast
A__ : str = True
A__ : Tuple = False
A__ : str = True
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
super().setUp()
snake_case : str = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = '''<s>'''
snake_case : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1_000 )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case : Any = self.get_tokenizer()
snake_case : str = self.get_rust_tokenizer()
snake_case : Tuple = '''I was born in 92000, and this is falsé.'''
snake_case : str = tokenizer.tokenize(UpperCAmelCase__ )
snake_case : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
snake_case : List[str] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : Optional[int] = tokenizer.encode(UpperCAmelCase__ )
snake_case : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any]=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : str = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# Simple input
snake_case : Union[str, Any] = '''This is a simple input'''
snake_case : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case : int = ('''This is a simple input''', '''This is a pair''')
snake_case : int = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
snake_case : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
snake_case : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case : int = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Any = '''Hello World!'''
snake_case : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case : Union[str, Any] = ''' '''.join(UpperCAmelCase__ )
snake_case : Optional[int] = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' )
snake_case : List[str] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
snake_case : Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case : Tuple = encoded_sequence['''input_ids'''].shape
snake_case : List[Any] = ReformerModel(UpperCAmelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
# fmt: off
snake_case : Tuple = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case : Tuple = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=UpperCAmelCase__ , sequences=UpperCAmelCase__ , )
| 84 | 0 |
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
snake_case : str = head.next, head
while fast and fast.next:
snake_case : Union[str, Any] = fast.next.next
snake_case : Any = slow.next
snake_case : Union[str, Any] = slow.next
snake_case : Tuple = None # Don't forget here! But forget still works!
# reverse the second part
snake_case : List[Any] = None
while second:
snake_case : str = second.next
snake_case : Dict = node
snake_case : str = second
snake_case : List[str] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case : List[Any] = node.next
snake_case : Optional[int] = head.next
return True
def a_ ( __magic_name__ ) -> Dict:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case : str = head
while fast and fast.next:
snake_case : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case : List[Any] = [slow.val]
while slow.next:
snake_case : str = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case : Optional[Any] = cur.next
return True
def a_ ( __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if not head or not head.next:
return True
snake_case : List[Any] = {}
snake_case : Tuple = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case : List[Any] = [pos]
snake_case : Optional[Any] = head.next
pos += 1
snake_case : Optional[Any] = pos - 1
snake_case : Tuple = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 721 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case , snake_case : Any = image.size
snake_case , snake_case : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case : Dict = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
snake_case : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case : Tuple = torch.from_numpy(__magic_name__ )
return 2.0 * image - 1.0
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : VQModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[int] = 100 , UpperCAmelCase__ : Optional[float] = 0.0 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[int] = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
snake_case : Any = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase__ )}" )
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[Any] = preprocess(UpperCAmelCase__ )
snake_case , snake_case : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case : str = next(self.unet.parameters() ).dtype
snake_case : Dict = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
snake_case : Any = image.to(device=self.device , dtype=UpperCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device )
snake_case : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_eta:
snake_case : Dict = eta
for t in self.progress_bar(UpperCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
snake_case : Optional[int] = torch.cat([latents, image] , dim=1 )
snake_case : str = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
snake_case : int = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : Any = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
snake_case : Optional[int] = self.vqvae.decode(UpperCAmelCase__ ).sample
snake_case : int = torch.clamp(UpperCAmelCase__ , -1.0 , 1.0 )
snake_case : Dict = image / 2 + 0.5
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Any = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : str = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a_ ( a ):
A__ : Dict = 'vit_mae'
def __init__( self : List[Any] , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : Any=3_072 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : Any=1e-1_2 , UpperCAmelCase__ : Dict=224 , UpperCAmelCase__ : Optional[int]=16 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Dict=512 , UpperCAmelCase__ : Optional[int]=8 , UpperCAmelCase__ : Dict=2_048 , UpperCAmelCase__ : Optional[int]=0.75 , UpperCAmelCase__ : Optional[Any]=False , **UpperCAmelCase__ : Optional[int] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Any = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Dict = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : Union[str, Any] = hidden_act
snake_case : Optional[int] = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : Dict = initializer_range
snake_case : List[str] = layer_norm_eps
snake_case : List[str] = image_size
snake_case : List[str] = patch_size
snake_case : Tuple = num_channels
snake_case : List[Any] = qkv_bias
snake_case : Optional[Any] = decoder_num_attention_heads
snake_case : Union[str, Any] = decoder_hidden_size
snake_case : str = decoder_num_hidden_layers
snake_case : Optional[Any] = decoder_intermediate_size
snake_case : Optional[int] = mask_ratio
snake_case : Optional[int] = norm_pix_loss
| 700 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class a_ ( a ):
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Dict = 5
# Realm tok
snake_case : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
snake_case : Any = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[int] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Dict = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=UpperCAmelCase__ , )
return block_records
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Tuple = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = self.get_config()
snake_case : Optional[Any] = self.get_dummy_retriever()
snake_case : Optional[int] = retriever.tokenizer
snake_case : Dict = np.array([0, 3] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Union[str, Any] = tokenizer(
['''the fourth'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : Optional[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : List[str] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.get_config()
snake_case : Optional[int] = self.get_dummy_retriever()
snake_case : List[str] = retriever.tokenizer
snake_case : Optional[Any] = np.array([0, 3, 5] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Any = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : List[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : Union[str, Any] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual([False, True, True] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
snake_case : Optional[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
snake_case : Any = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case : Any = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 84 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a : Optional[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class a_ ( a ):
A__ : Tuple = 'conditional_detr'
A__ : int = ['past_key_values']
A__ : Optional[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=None , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Any=300 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2_048 , UpperCAmelCase__ : Optional[int]=8 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Optional[Any]=2_048 , UpperCAmelCase__ : Any=8 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]="relu" , UpperCAmelCase__ : str=256 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[str]=1.0 , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : str="sine" , UpperCAmelCase__ : Tuple="resnet50" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Union[str, Any]=1 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Dict=0.25 , **UpperCAmelCase__ : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
snake_case : Optional[int] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Tuple = backbone_config.get('''model_type''' )
snake_case : str = CONFIG_MAPPING[backbone_model_type]
snake_case : str = config_class.from_dict(UpperCAmelCase__ )
snake_case : Dict = use_timm_backbone
snake_case : List[Any] = backbone_config
snake_case : List[Any] = num_channels
snake_case : List[str] = num_queries
snake_case : str = d_model
snake_case : int = encoder_ffn_dim
snake_case : str = encoder_layers
snake_case : int = encoder_attention_heads
snake_case : Dict = decoder_ffn_dim
snake_case : int = decoder_layers
snake_case : Any = decoder_attention_heads
snake_case : Any = dropout
snake_case : Optional[Any] = attention_dropout
snake_case : List[str] = activation_dropout
snake_case : str = activation_function
snake_case : Any = init_std
snake_case : Any = init_xavier_std
snake_case : Tuple = encoder_layerdrop
snake_case : List[Any] = decoder_layerdrop
snake_case : List[Any] = encoder_layers
snake_case : List[str] = auxiliary_loss
snake_case : Optional[Any] = position_embedding_type
snake_case : Any = backbone
snake_case : List[Any] = use_pretrained_backbone
snake_case : int = dilation
# Hungarian matcher
snake_case : Union[str, Any] = class_cost
snake_case : Any = bbox_cost
snake_case : Dict = giou_cost
# Loss coefficients
snake_case : List[Any] = mask_loss_coefficient
snake_case : Union[str, Any] = dice_loss_coefficient
snake_case : Tuple = cls_loss_coefficient
snake_case : Dict = bbox_loss_coefficient
snake_case : int = giou_loss_coefficient
snake_case : Optional[Any] = focal_alpha
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
return self.d_model
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case : Union[str, Any] = self.backbone_config.to_dict()
snake_case : List[str] = self.__class__.model_type
return output
class a_ ( a ):
A__ : Dict = version.parse('1.11' )
@property
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return 1e-5
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return 12
| 701 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : str = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_a : int = logging.get_logger(__name__)
_a : List[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
_a : str = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
for attribute in key.split('''.''' ):
snake_case : Any = getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
snake_case : Optional[Any] = getattr(__magic_name__ , __magic_name__ ).shape
else:
snake_case : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
snake_case : Tuple = value
elif weight_type == "weight_g":
snake_case : Tuple = value
elif weight_type == "weight_v":
snake_case : List[str] = value
elif weight_type == "bias":
snake_case : Union[str, Any] = value
elif weight_type == "running_mean":
snake_case : Any = value
elif weight_type == "running_var":
snake_case : Optional[int] = value
elif weight_type == "num_batches_tracked":
snake_case : List[Any] = value
elif weight_type == "inv_freq":
snake_case : str = value
else:
snake_case : Dict = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
snake_case : Optional[int] = []
snake_case : List[str] = fairseq_model.state_dict()
snake_case : List[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
snake_case : Dict = False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , )
snake_case : Tuple = True
else:
for key, mapped_key in MAPPING.items():
snake_case : Optional[int] = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
snake_case : List[str] = True
if "*" in mapped_key:
snake_case : List[str] = name.split(__magic_name__ )[0].split('''.''' )[-2]
snake_case : List[Any] = mapped_key.replace('''*''' , __magic_name__ )
if "pos_bias_u" in name:
snake_case : int = None
elif "pos_bias_v" in name:
snake_case : Optional[int] = None
elif "weight_g" in name:
snake_case : List[str] = '''weight_g'''
elif "weight_v" in name:
snake_case : Any = '''weight_v'''
elif "bias" in name:
snake_case : Any = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case : Optional[Any] = '''weight'''
elif "running_mean" in name:
snake_case : List[Any] = '''running_mean'''
elif "inv_freq" in name:
snake_case : Optional[int] = '''inv_freq'''
elif "running_var" in name:
snake_case : Optional[int] = '''running_var'''
elif "num_batches_tracked" in name:
snake_case : Optional[Any] = '''num_batches_tracked'''
else:
snake_case : Dict = None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(F"Unused weights: {unused_weights}" )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : Tuple = full_name.split('''conv_layers.''' )[-1]
snake_case : List[Any] = name.split('''.''' )
snake_case : int = int(items[0] )
snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
snake_case : Dict = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
snake_case : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
snake_case : Optional[int] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
snake_case : Tuple = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
snake_case : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act='''swish''' )
else:
snake_case : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
snake_case : Any = '''rotary'''
if is_finetuned:
if dict_path:
snake_case : Dict = Dictionary.load(__magic_name__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case : List[Any] = target_dict.pad_index
snake_case : Dict = target_dict.bos_index
snake_case : str = target_dict.eos_index
snake_case : List[Any] = len(target_dict.symbols )
snake_case : Optional[int] = os.path.join(__magic_name__ , '''vocab.json''' )
if not os.path.isdir(__magic_name__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) )
return
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
snake_case : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case : Union[str, Any] = 0
snake_case : List[str] = 1
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__magic_name__ , __magic_name__ )
snake_case : str = WavaVecaCTCTokenizer(
__magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , )
snake_case : Dict = True if config.feat_extract_norm == '''layer''' else False
snake_case : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , )
snake_case : str = WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
processor.save_pretrained(__magic_name__ )
snake_case : Optional[int] = WavaVecaConformerForCTC(__magic_name__ )
else:
snake_case : Optional[Any] = WavaVecaConformerForPreTraining(__magic_name__ )
if is_finetuned:
snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
snake_case : Optional[Any] = argparse.Namespace(task='''audio_pretraining''' )
snake_case : Dict = fairseq.tasks.setup_task(__magic_name__ )
snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ )
snake_case : Dict = model[0].eval()
recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned )
hf_wavavec.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_a : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 702 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_a : Union[str, Any] = {
'yjernite/retribert-base-uncased': 512,
}
_a : Tuple = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class a_ ( a ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Any = PRETRAINED_INIT_CONFIGURATION
A__ : Optional[Any] = RetriBertTokenizer
A__ : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict="[UNK]" , UpperCAmelCase__ : str="[SEP]" , UpperCAmelCase__ : Union[str, Any]="[PAD]" , UpperCAmelCase__ : Dict="[CLS]" , UpperCAmelCase__ : Optional[Any]="[MASK]" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Dict , ):
"""simple docstring"""
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
snake_case : int = getattr(UpperCAmelCase__ , normalizer_state.pop('''type''' ) )
snake_case : List[Any] = do_lower_case
snake_case : Union[str, Any] = strip_accents
snake_case : int = tokenize_chinese_chars
snake_case : int = normalizer_class(**UpperCAmelCase__ )
snake_case : Union[str, Any] = do_lower_case
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=None ):
"""simple docstring"""
snake_case : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : List[Any] = [self.sep_token_id]
snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
snake_case : Tuple = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 84 | 0 |
import torch
from torch import nn
class a_ ( nn.Module ):
def __init__( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int=1 , UpperCAmelCase__ : List[str]=False ):
"""simple docstring"""
super().__init__()
snake_case : Any = n_token
snake_case : str = d_embed
snake_case : Optional[int] = d_proj
snake_case : Tuple = cutoffs + [n_token]
snake_case : int = [0] + self.cutoffs
snake_case : Optional[int] = div_val
snake_case : str = self.cutoffs[0]
snake_case : Optional[Any] = len(self.cutoffs ) - 1
snake_case : Union[str, Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
snake_case : Union[str, Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
snake_case : Union[str, Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
snake_case : Tuple = nn.ModuleList()
snake_case : List[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
else:
self.out_projs.append(UpperCAmelCase__ )
self.out_layers.append(nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ ) )
else:
for i in range(len(self.cutoffs ) ):
snake_case : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
self.out_layers.append(nn.Linear(UpperCAmelCase__ , r_idx - l_idx ) )
snake_case : Optional[int] = keep_order
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict ):
"""simple docstring"""
if proj is None:
snake_case : Dict = nn.functional.linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
snake_case : Optional[Any] = nn.functional.linear(UpperCAmelCase__ , proj.t().contiguous() )
snake_case : Any = nn.functional.linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Any=False ):
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
snake_case : Tuple = hidden[..., :-1, :].contiguous()
snake_case : Union[str, Any] = labels[..., 1:].contiguous()
snake_case : Any = hidden.view(-1 , hidden.size(-1 ) )
snake_case : Any = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
snake_case : int = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
snake_case : str = self._compute_logit(UpperCAmelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
snake_case : Tuple = labels != -100
snake_case : Optional[Any] = torch.zeros_like(UpperCAmelCase__ , dtype=hidden.dtype , device=hidden.device )
snake_case : Any = (
-nn.functional.log_softmax(UpperCAmelCase__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
snake_case : Any = nn.functional.log_softmax(UpperCAmelCase__ , dim=-1 )
else:
# construct weights and biases
snake_case : Any = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
snake_case : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
snake_case : str = self.out_layers[0].bias[l_idx:r_idx]
else:
snake_case : Union[str, Any] = self.out_layers[i].weight
snake_case : List[str] = self.out_layers[i].bias
if i == 0:
snake_case : List[str] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
snake_case : int = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCAmelCase__ )
biases.append(UpperCAmelCase__ )
snake_case : Dict = weights[0], biases[0], self.out_projs[0]
snake_case : Dict = self._compute_logit(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Optional[int] = nn.functional.log_softmax(UpperCAmelCase__ , dim=1 )
if labels is None:
snake_case : List[str] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
snake_case : Optional[Any] = torch.zeros_like(UpperCAmelCase__ , dtype=hidden.dtype , device=hidden.device )
snake_case : Optional[int] = 0
snake_case : Dict = [0] + self.cutoffs
for i in range(len(UpperCAmelCase__ ) - 1 ):
snake_case : Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
snake_case : Optional[int] = (labels >= l_idx) & (labels < r_idx)
snake_case : Optional[Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
snake_case : Any = labels.index_select(0 , UpperCAmelCase__ ) - l_idx
snake_case : str = head_logprob.index_select(0 , UpperCAmelCase__ )
snake_case : Dict = hidden.index_select(0 , UpperCAmelCase__ )
else:
snake_case : Dict = hidden
if i == 0:
if labels is not None:
snake_case : int = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
snake_case : str = head_logprob[:, : self.cutoffs[0]]
else:
snake_case : Tuple = weights[i], biases[i], self.out_projs[i]
snake_case : Dict = self._compute_logit(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Dict = nn.functional.log_softmax(UpperCAmelCase__ , dim=1 )
snake_case : Any = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
snake_case : Any = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
snake_case : Dict = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
snake_case : List[Any] = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCAmelCase__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if self.n_clusters == 0:
snake_case : Optional[Any] = self._compute_logit(UpperCAmelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCAmelCase__ , dim=-1 )
else:
# construct weights and biases
snake_case : Optional[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
snake_case : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case : List[str] = self.out_layers[0].weight[l_idx:r_idx]
snake_case : Optional[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
snake_case : Any = self.out_layers[i].weight
snake_case : Optional[Any] = self.out_layers[i].bias
if i == 0:
snake_case : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
snake_case : Any = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCAmelCase__ )
biases.append(UpperCAmelCase__ )
snake_case : Optional[Any] = weights[0], biases[0], self.out_projs[0]
snake_case : int = self._compute_logit(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Optional[int] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
snake_case : List[Any] = nn.functional.log_softmax(UpperCAmelCase__ , dim=1 )
snake_case : int = [0] + self.cutoffs
for i in range(len(UpperCAmelCase__ ) - 1 ):
snake_case : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
snake_case : int = head_logprob[:, : self.cutoffs[0]]
else:
snake_case : Dict = weights[i], biases[i], self.out_projs[i]
snake_case : Tuple = self._compute_logit(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = nn.functional.log_softmax(UpperCAmelCase__ , dim=1 )
snake_case : Tuple = head_logprob[:, -i] + tail_logprob_i
snake_case : Any = logprob_i
return out
| 703 |
import string
import numpy
def a_ ( __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , __magic_name__ )
class a_ :
A__ : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
A__ : List[str] = numpy.vectorize(lambda a : x % 36 )
A__ : Dict = numpy.vectorize(a )
def __init__( self : List[str] , UpperCAmelCase__ : numpy.ndarray ):
"""simple docstring"""
snake_case : int = self.modulus(UpperCAmelCase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
snake_case : List[str] = encrypt_key.shape[0]
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
return self.key_string.index(UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : int ):
"""simple docstring"""
return self.key_string[round(UpperCAmelCase__ )]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : Tuple = det % len(self.key_string )
snake_case : Tuple = len(self.key_string )
if greatest_common_divisor(UpperCAmelCase__ , len(self.key_string ) ) != 1:
snake_case : List[Any] = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = [char for char in text.upper() if char in self.key_string]
snake_case : Optional[int] = chars[-1]
while len(UpperCAmelCase__ ) % self.break_key != 0:
chars.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = self.process_text(text.upper() )
snake_case : Optional[int] = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : int = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : Tuple = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(self.encrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[
0
]
snake_case : Dict = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : int = det % len(self.key_string )
snake_case : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
snake_case : Any = i
break
snake_case : Any = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCAmelCase__ ) )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Any = self.make_decrypt_key()
snake_case : Optional[Any] = self.process_text(text.upper() )
snake_case : int = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : Any = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : List[str] = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(decrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[0]
snake_case : int = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a_ ( ) -> None:
"""simple docstring"""
snake_case : Any = int(input('''Enter the order of the encryption key: ''' ) )
snake_case : List[Any] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(__magic_name__ ):
snake_case : Optional[Any] = [int(__magic_name__ ) for x in input().split()]
hill_matrix.append(__magic_name__ )
snake_case : List[str] = HillCipher(numpy.array(__magic_name__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
snake_case : int = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
snake_case : List[Any] = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(__magic_name__ ) )
elif option == "2":
snake_case : int = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 84 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( a ):
A__ : List[Any] = 'Salesforce/blip-image-captioning-base'
A__ : Dict = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
A__ : str = 'image_captioner'
A__ : Dict = AutoModelForVisionaSeq
A__ : Optional[Any] = ['image']
A__ : List[str] = ['text']
def __init__( self : List[str] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=UpperCAmelCase__ , return_tensors='''pt''' )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return self.model.generate(**UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0].strip()
| 84 | 0 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_a : Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = 16_000 ) -> Any:
"""simple docstring"""
snake_case : str = int(round(sample_rate * max_length ) )
if len(__magic_name__ ) <= sample_length:
return wav
snake_case : int = randint(0 , len(__magic_name__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class a_ :
A__ : Optional[str] = field(default=a , metadata={'help': 'Name of a dataset from the datasets package'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'A file containing the training audio paths and labels.'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'A file containing the validation audio paths and labels.'} )
A__ : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
A__ : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
A__ : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
A__ : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
A__ : Optional[int] = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A__ : Optional[int] = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
A__ : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class a_ :
A__ : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
A__ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Name or path of preprocessor config.'} )
A__ : bool = field(
default=a , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
A__ : bool = field(
default=a , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
A__ : bool = field(
default=a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A__ : Optional[bool] = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
A__ : bool = field(
default=a , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCAmelCase( self : str ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , UpperCAmelCase__ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def a_ ( ) -> str:
"""simple docstring"""
snake_case : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , __magic_name__ , __magic_name__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(__magic_name__ )
transformers.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
snake_case : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
snake_case : Any = DatasetDict()
snake_case : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
snake_case : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
snake_case : Optional[int] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
snake_case : int = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
snake_case : int = feature_extractor.model_input_names[0]
def train_transforms(__magic_name__ ):
snake_case : Optional[Any] = []
for audio in batch[data_args.audio_column_name]:
snake_case : Any = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__magic_name__ )
snake_case : Dict = feature_extractor(__magic_name__ , sampling_rate=feature_extractor.sampling_rate )
snake_case : Optional[Any] = {model_input_name: inputs.get(__magic_name__ )}
snake_case : List[Any] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__magic_name__ ):
snake_case : str = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
snake_case : Union[str, Any] = feature_extractor(__magic_name__ , sampling_rate=feature_extractor.sampling_rate )
snake_case : str = {model_input_name: inputs.get(__magic_name__ )}
snake_case : Dict = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case : int = raw_datasets['''train'''].features[data_args.label_column_name].names
snake_case : int = {}, {}
for i, label in enumerate(__magic_name__ ):
snake_case : str = str(__magic_name__ )
snake_case : Optional[int] = label
# Load the accuracy metric from the datasets package
snake_case : Union[str, Any] = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__magic_name__ ):
snake_case : Any = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__magic_name__ , references=eval_pred.label_ids )
snake_case : Tuple = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__magic_name__ ) , labelaid=__magic_name__ , idalabel=__magic_name__ , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case : int = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case : Dict = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__magic_name__ , output_all_columns=__magic_name__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case : Dict = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__magic_name__ , output_all_columns=__magic_name__ )
# Initialize our trainer
snake_case : str = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=__magic_name__ , tokenizer=__magic_name__ , )
# Training
if training_args.do_train:
snake_case : List[Any] = None
if training_args.resume_from_checkpoint is not None:
snake_case : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case : Optional[int] = last_checkpoint
snake_case : List[str] = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case : Dict = trainer.evaluate()
trainer.log_metrics('''eval''' , __magic_name__ )
trainer.save_metrics('''eval''' , __magic_name__ )
# Write model card and (optionally) push to hub
snake_case : Tuple = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__magic_name__ )
else:
trainer.create_model_card(**__magic_name__ )
if __name__ == "__main__":
main()
| 705 |
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
snake_case : int = 4
snake_case : Optional[Any] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 84 | 0 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a : Optional[Any] = logging.get_logger()
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True ) -> Any:
"""simple docstring"""
print(F"Converting {name}..." )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
snake_case : int = timm.create_model('''levit_128s''' , pretrained=__magic_name__ )
else:
snake_case : List[Any] = timm.create_model('''levit_128''' , pretrained=__magic_name__ )
if hidden_sizes == 192:
snake_case : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=__magic_name__ )
if hidden_sizes == 256:
snake_case : Optional[int] = timm.create_model('''levit_256''' , pretrained=__magic_name__ )
if hidden_sizes == 384:
snake_case : int = timm.create_model('''levit_384''' , pretrained=__magic_name__ )
from_model.eval()
snake_case : List[Any] = LevitForImageClassificationWithTeacher(__magic_name__ ).eval()
snake_case : Optional[Any] = OrderedDict()
snake_case : Any = from_model.state_dict()
snake_case : int = list(from_model.state_dict().keys() )
snake_case : Union[str, Any] = list(our_model.state_dict().keys() )
print(len(__magic_name__ ) , len(__magic_name__ ) )
for i in range(len(__magic_name__ ) ):
snake_case : Tuple = weights[og_keys[i]]
our_model.load_state_dict(__magic_name__ )
snake_case : Union[str, Any] = torch.randn((2, 3, 224, 224) )
snake_case : Tuple = from_model(__magic_name__ )
snake_case : Any = our_model(__magic_name__ ).logits
assert torch.allclose(__magic_name__ , __magic_name__ ), "The model logits don't match the original one."
snake_case : Optional[Any] = name
print(__magic_name__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
snake_case : str = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"Pushed {checkpoint_name}" )
def a_ ( __magic_name__ , __magic_name__ = None , __magic_name__ = True ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Union[str, Any] = '''imagenet-1k-id2label.json'''
snake_case : List[Any] = 1_000
snake_case : Dict = (1, num_labels)
snake_case : Dict = '''huggingface/label-files'''
snake_case : Optional[Any] = num_labels
snake_case : int = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case : Dict = idalabel
snake_case : Any = {v: k for k, v in idalabel.items()}
snake_case : Optional[int] = partial(__magic_name__ , num_labels=__magic_name__ , idalabel=__magic_name__ , labelaid=__magic_name__ )
snake_case : Union[str, Any] = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
snake_case : List[str] = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __magic_name__ , names_to_config[model_name] , __magic_name__ , __magic_name__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
return config, expected_shape
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_a : Dict = parser.parse_args()
_a : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 706 |
from sklearn.metrics import fa_score
import datasets
_a : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
_a : Dict = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
_a : List[Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : List[str]="binary" , UpperCAmelCase__ : str=None ):
"""simple docstring"""
snake_case : List[Any] = fa_score(
UpperCAmelCase__ , UpperCAmelCase__ , labels=UpperCAmelCase__ , pos_label=UpperCAmelCase__ , average=UpperCAmelCase__ , sample_weight=UpperCAmelCase__ )
return {"f1": float(UpperCAmelCase__ ) if score.size == 1 else score}
| 84 | 0 |
import re
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
snake_case : List[str] = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
_a : Any = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 707 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('''only integers accepted as input''' )
else:
snake_case : str = str(abs(__magic_name__ ) )
snake_case : Optional[Any] = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int(''''''.join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 84 | 0 |
def a_ ( __magic_name__ , __magic_name__ ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__magic_name__ ) , __magic_name__ )
return number - int(__magic_name__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 708 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=99 , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Any=9 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : str=0.002 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , ):
"""simple docstring"""
snake_case : Union[str, Any] = parent
snake_case : Union[str, Any] = batch_size
snake_case : Any = encoder_seq_length
snake_case : str = decoder_seq_length
# For common tests
snake_case : Optional[int] = self.decoder_seq_length
snake_case : Optional[Any] = is_training
snake_case : List[Any] = use_attention_mask
snake_case : Union[str, Any] = use_labels
snake_case : Any = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : Any = d_ff
snake_case : Any = relative_attention_num_buckets
snake_case : Optional[Any] = dropout_rate
snake_case : int = initializer_factor
snake_case : Optional[Any] = eos_token_id
snake_case : Dict = pad_token_id
snake_case : Optional[Any] = decoder_start_token_id
snake_case : Union[str, Any] = None
snake_case : List[str] = decoder_layers
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
return TaConfig.from_pretrained('''google/umt5-base''' )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
snake_case : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case : List[Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if decoder_head_mask is None:
snake_case : Tuple = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if cross_attn_head_mask is None:
snake_case : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case : List[str] = input_ids.clamp(self.pad_token_id + 1 )
snake_case : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case : str = self.get_config()
snake_case : Tuple = config.num_attention_heads
snake_case : List[Any] = self.prepare_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, input_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , ):
"""simple docstring"""
snake_case : str = UMTaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : str = model(
input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , )
snake_case : int = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
snake_case : int = result.last_hidden_state
snake_case : Dict = result.past_key_values
snake_case : Dict = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval()
# first forward pass
snake_case : List[Any] = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
snake_case : List[Any] = model(UpperCAmelCase__ )
snake_case : Any = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) + 1 )
snake_case , snake_case : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : Any = model(UpperCAmelCase__ )['''last_hidden_state''']
snake_case : Optional[Any] = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )['''last_hidden_state''']
# select random slice
snake_case : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).half().eval()
snake_case : str = model(**UpperCAmelCase__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(UpperCAmelCase__ ).any().item() )
@require_torch
class a_ ( a , a , a , unittest.TestCase ):
A__ : str = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A__ : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A__ : Any = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A__ : Dict = True
A__ : List[str] = False
A__ : Optional[int] = False
A__ : Optional[int] = True
A__ : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A__ : int = [0.8, 0.9]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
snake_case : Optional[Any] = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=UpperCAmelCase__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Optional[int] = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
snake_case : int = config_and_inputs[0]
snake_case : Union[str, Any] = UMTaForConditionalGeneration(UpperCAmelCase__ ).eval()
model.to(UpperCAmelCase__ )
snake_case : str = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase__ , head_masking.items() ):
snake_case : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case : List[str] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ )
snake_case : Union[str, Any] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , **UpperCAmelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=UpperCAmelCase__ ).to(UpperCAmelCase__ )
snake_case : int = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=UpperCAmelCase__ , legacy=UpperCAmelCase__ )
snake_case : List[str] = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
snake_case : Dict = tokenizer(UpperCAmelCase__ , return_tensors='''pt''' , padding=UpperCAmelCase__ ).input_ids
# fmt: off
snake_case : Optional[Any] = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = model.generate(input_ids.to(UpperCAmelCase__ ) )
snake_case : int = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
snake_case : Tuple = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 84 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : Any = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class a_ ( a ):
A__ : List[str] = 'mvp'
A__ : Optional[int] = ['past_key_values']
A__ : Optional[int] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str , UpperCAmelCase__ : Optional[Any]=50_267 , UpperCAmelCase__ : Tuple=1_024 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : List[str]=4_096 , UpperCAmelCase__ : Optional[int]=16 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : Union[str, Any]=4_096 , UpperCAmelCase__ : Optional[Any]=16 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Optional[int]=1_024 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[int]=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=100 , UpperCAmelCase__ : List[str]=800 , **UpperCAmelCase__ : Optional[int] , ):
"""simple docstring"""
snake_case : Any = vocab_size
snake_case : Tuple = max_position_embeddings
snake_case : List[Any] = d_model
snake_case : Optional[Any] = encoder_ffn_dim
snake_case : Union[str, Any] = encoder_layers
snake_case : Union[str, Any] = encoder_attention_heads
snake_case : Any = decoder_ffn_dim
snake_case : str = decoder_layers
snake_case : List[str] = decoder_attention_heads
snake_case : int = dropout
snake_case : Union[str, Any] = attention_dropout
snake_case : str = activation_dropout
snake_case : Any = activation_function
snake_case : int = init_std
snake_case : Union[str, Any] = encoder_layerdrop
snake_case : Dict = decoder_layerdrop
snake_case : Dict = classifier_dropout
snake_case : Tuple = use_cache
snake_case : int = encoder_layers
snake_case : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case : int = use_prompt
snake_case : str = prompt_length
snake_case : Tuple = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCAmelCase__ ):
snake_case : List[str] = self.bos_token_id
warnings.warn(
F"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'''The config can simply be saved and uploaded again to be fixed.''' )
| 709 |
import torch
from diffusers import DiffusionPipeline
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
def __call__( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
snake_case : Dict = 1
snake_case : Optional[Any] = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
snake_case : List[Any] = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
snake_case : List[Any] = scheduler_output - scheduler_output + torch.ones_like(UpperCAmelCase__ )
return result
| 84 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( a , a , unittest.TestCase ):
A__ : Union[str, Any] = CycleDiffusionPipeline
A__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
A__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
A__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
A__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
A__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
snake_case : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1_000 , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
snake_case : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case : List[str] = CLIPTextModel(UpperCAmelCase__ )
snake_case : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict=0 ):
"""simple docstring"""
snake_case : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
snake_case : str = image / 2 + 0.5
if str(UpperCAmelCase__ ).startswith('''mps''' ):
snake_case : Any = torch.manual_seed(UpperCAmelCase__ )
else:
snake_case : Dict = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
snake_case : Tuple = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case : Dict = self.get_dummy_components()
snake_case : List[str] = CycleDiffusionPipeline(**UpperCAmelCase__ )
snake_case : Dict = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : int = self.get_dummy_inputs(UpperCAmelCase__ )
snake_case : Optional[int] = pipe(**UpperCAmelCase__ )
snake_case : str = output.images
snake_case : str = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case : Optional[int] = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = self.get_dummy_components()
for name, module in components.items():
if hasattr(UpperCAmelCase__ , '''half''' ):
snake_case : List[str] = module.half()
snake_case : List[str] = CycleDiffusionPipeline(**UpperCAmelCase__ )
snake_case : Tuple = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : Tuple = self.get_dummy_inputs(UpperCAmelCase__ )
snake_case : Optional[int] = pipe(**UpperCAmelCase__ )
snake_case : Optional[int] = output.images
snake_case : List[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case : str = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase( self : int ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase( self : int ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
snake_case : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
snake_case : List[Any] = init_image.resize((512, 512) )
snake_case : Dict = '''CompVis/stable-diffusion-v1-4'''
snake_case : Any = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' )
snake_case : List[Any] = CycleDiffusionPipeline.from_pretrained(
UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
snake_case : Tuple = '''A black colored car'''
snake_case : str = '''A blue colored car'''
snake_case : Union[str, Any] = torch.manual_seed(0 )
snake_case : List[Any] = pipe(
prompt=UpperCAmelCase__ , source_prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase__ , output_type='''np''' , )
snake_case : Any = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
snake_case : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
snake_case : str = init_image.resize((512, 512) )
snake_case : Optional[Any] = '''CompVis/stable-diffusion-v1-4'''
snake_case : Union[str, Any] = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' )
snake_case : Tuple = CycleDiffusionPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
snake_case : List[Any] = '''A black colored car'''
snake_case : Tuple = '''A blue colored car'''
snake_case : Optional[int] = torch.manual_seed(0 )
snake_case : Tuple = pipe(
prompt=UpperCAmelCase__ , source_prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase__ , output_type='''np''' , )
snake_case : str = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 710 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( a ):
A__ : List[str] = ['image_processor', 'tokenizer']
A__ : Any = 'CLIPImageProcessor'
A__ : Optional[int] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase__ , )
snake_case : List[Any] = kwargs.pop('''feature_extractor''' )
snake_case : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self : Any , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case : int = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if images is not None:
snake_case : Dict = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and images is not None:
snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : str ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : int = self.tokenizer.model_input_names
snake_case : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase__ , )
return self.image_processor_class
@property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase__ , )
return self.image_processor
| 84 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( a , unittest.TestCase ):
A__ : Optional[Any] = OpenAIGPTTokenizer
A__ : List[str] = OpenAIGPTTokenizerFast
A__ : Union[str, Any] = True
A__ : List[Any] = False
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
snake_case : Dict = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
snake_case : Tuple = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return "lower newer", "lower newer"
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Union[str, Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
snake_case : List[Any] = '''lower'''
snake_case : Tuple = ['''low''', '''er</w>''']
snake_case : str = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Any = tokens + ['''<unk>''']
snake_case : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase( self : str , UpperCAmelCase__ : Dict=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# Simple input
snake_case : Union[str, Any] = '''This is a simple input'''
snake_case : Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case : List[Any] = ('''This is a simple input''', '''This is a pair''')
snake_case : Dict = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class a_ ( a ):
pass
| 711 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_a : Optional[Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
_a : str = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
_a : List[Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Optional[int]=0.9 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : List[Any]=500 , UpperCAmelCase__ : Union[str, Any]="gpt2-large" , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : List[Any]=25 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=25 , ):
"""simple docstring"""
snake_case : List[str] = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 84 | 0 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : Dict=30 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[Any]=10 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Optional[Any]=0.6 , UpperCAmelCase__ : Dict=None , ):
"""simple docstring"""
snake_case : int = parent
snake_case : List[str] = batch_size
snake_case : Tuple = image_size
snake_case : List[str] = patch_size
snake_case : Optional[Any] = num_channels
snake_case : Optional[Any] = is_training
snake_case : Any = use_labels
snake_case : Union[str, Any] = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Union[str, Any] = intermediate_size
snake_case : Union[str, Any] = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : str = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : List[Any] = mask_ratio
snake_case : Dict = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case : Dict = (image_size // patch_size) ** 2
snake_case : Tuple = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : int = None
if self.use_labels:
snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
snake_case : str = ViTMAEModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Dict = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
snake_case : Dict = ViTMAEForPreTraining(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Any = model(UpperCAmelCase__ )
snake_case : str = (self.image_size // self.patch_size) ** 2
snake_case : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case : str = 1
snake_case : Any = ViTMAEForPreTraining(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case : List[str] = model(UpperCAmelCase__ )
snake_case : int = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[int] = self.prepare_config_and_inputs()
snake_case : int = config_and_inputs
snake_case : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a , a , unittest.TestCase ):
A__ : str = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
A__ : int = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
A__ : int = False
A__ : Optional[Any] = False
A__ : Union[str, Any] = False
A__ : List[str] = False
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Union[str, Any] = ViTMAEModelTester(self )
snake_case : Tuple = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : List[Any] = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Tuple = model_class(UpperCAmelCase__ )
snake_case : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : List[str] = [*signature.parameters.keys()]
snake_case : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
np.random.seed(2 )
snake_case : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
snake_case : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case : List[str] = torch.from_numpy(UpperCAmelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case : Optional[Any] = pt_noise
super().check_pt_tf_models(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[int] = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
snake_case : List[Any] = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
snake_case : Optional[int] = outputs[0].cpu().numpy()
snake_case : List[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase__ )
snake_case : int = model_class.from_pretrained(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
snake_case : str = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
# Make sure we don't have nans
snake_case : Union[str, Any] = after_outputs[0].cpu().numpy()
snake_case : Tuple = 0
snake_case : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase__ , 1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
pass
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Dict = ViTMAEModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ) -> List[Any]:
"""simple docstring"""
snake_case : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
np.random.seed(2 )
snake_case : str = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(UpperCAmelCase__ )
snake_case : Any = self.default_image_processor
snake_case : List[Any] = prepare_img()
snake_case : int = image_processor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case : Dict = ViTMAEConfig()
snake_case : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
snake_case : List[str] = model(**UpperCAmelCase__ , noise=torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ ) )
# verify the logits
snake_case : Optional[Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
snake_case : List[str] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase__ ) , atol=1e-4 ) )
| 712 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
if "cls_token" in name:
snake_case : Tuple = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
snake_case : Optional[int] = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
snake_case : List[str] = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case : List[Any] = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case : int = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
snake_case : int = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
snake_case : Optional[Any] = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
snake_case : str = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case : Any = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
snake_case : Dict = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
snake_case : Dict = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
snake_case : Dict = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
snake_case : Optional[int] = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def a_ ( __magic_name__ , __magic_name__ ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case : Union[str, Any] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
snake_case : Optional[int] = key.split('''.''' )
snake_case : int = int(key_split[1] )
if "decoder_blocks" in key:
snake_case : List[str] = config.decoder_hidden_size
snake_case : List[Any] = '''decoder.decoder_layers.'''
if "weight" in key:
snake_case : str = val[:dim, :]
snake_case : Optional[Any] = val[dim : dim * 2, :]
snake_case : Any = val[-dim:, :]
elif "bias" in key:
snake_case : Optional[Any] = val[:dim]
snake_case : List[Any] = val[dim : dim * 2]
snake_case : List[Any] = val[-dim:]
else:
snake_case : Optional[int] = config.hidden_size
snake_case : Tuple = '''vit.encoder.layer.'''
if "weight" in key:
snake_case : Optional[Any] = val[:dim, :]
snake_case : str = val[dim : dim * 2, :]
snake_case : Union[str, Any] = val[-dim:, :]
elif "bias" in key:
snake_case : Tuple = val[:dim]
snake_case : int = val[dim : dim * 2]
snake_case : Optional[Any] = val[-dim:]
else:
snake_case : Optional[Any] = val
return orig_state_dict
def a_ ( __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : List[str] = ViTMAEConfig()
if "large" in checkpoint_url:
snake_case : str = 1_024
snake_case : Tuple = 4_096
snake_case : Optional[Any] = 24
snake_case : List[Any] = 16
elif "huge" in checkpoint_url:
snake_case : Tuple = 14
snake_case : int = 1_280
snake_case : Dict = 5_120
snake_case : Tuple = 32
snake_case : Optional[Any] = 16
snake_case : Optional[Any] = ViTMAEForPreTraining(__magic_name__ )
snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''model''']
snake_case : int = ViTMAEImageProcessor(size=config.image_size )
snake_case : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
snake_case : Tuple = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
snake_case : List[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
snake_case : Dict = ViTMAEImageProcessor(size=config.image_size )
snake_case : str = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
snake_case : Union[str, Any] = model(**__magic_name__ )
snake_case : Optional[Any] = outputs.logits
if "large" in checkpoint_url:
snake_case : Any = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
snake_case : List[Any] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
snake_case : Dict = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a : str = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 84 | 0 |
from __future__ import annotations
class a_ :
def __init__( self : List[str] , UpperCAmelCase__ : int ):
"""simple docstring"""
snake_case : str = order
# a_{0} ... a_{k}
snake_case : Optional[Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
snake_case : str = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
snake_case : Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
snake_case : Tuple = [0.0] * self.order
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : list[float] , UpperCAmelCase__ : list[float] ):
"""simple docstring"""
if len(UpperCAmelCase__ ) < self.order:
snake_case : Optional[Any] = [1.0, *a_coeffs]
if len(UpperCAmelCase__ ) != self.order + 1:
snake_case : int = (
F"Expected a_coeffs to have {self.order + 1} elements "
F"for {self.order}-order filter, got {len(UpperCAmelCase__ )}"
)
raise ValueError(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != self.order + 1:
snake_case : Tuple = (
F"Expected b_coeffs to have {self.order + 1} elements "
F"for {self.order}-order filter, got {len(UpperCAmelCase__ )}"
)
raise ValueError(UpperCAmelCase__ )
snake_case : Optional[Any] = a_coeffs
snake_case : Dict = b_coeffs
def lowerCAmelCase( self : Any , UpperCAmelCase__ : float ):
"""simple docstring"""
snake_case : Optional[int] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
snake_case : List[Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
snake_case : Optional[Any] = self.input_history[:-1]
snake_case : int = self.output_history[:-1]
snake_case : Any = sample
snake_case : Optional[Any] = result
return result
| 713 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : Optional[Any] = 16
_a : Union[str, Any] = 32
def a_ ( __magic_name__ , __magic_name__ = 16 ) -> Dict:
"""simple docstring"""
snake_case : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case : Union[str, Any] = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case : Dict = 8
else:
snake_case : Union[str, Any] = None
return tokenizer.pad(
__magic_name__ , padding='''longest''' , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
snake_case : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : Optional[int] = mocked_dataloaders # noqa: F811
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __magic_name__ ) == "1":
snake_case : Optional[int] = 2
# Initialize accelerator
snake_case : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : Dict = config['''lr''']
snake_case : Any = int(config['''num_epochs'''] )
snake_case : List[str] = int(config['''seed'''] )
snake_case : List[Any] = int(config['''batch_size'''] )
snake_case : Tuple = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : str = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case : Optional[int] = AdamW(params=model.parameters() , lr=__magic_name__ )
snake_case , snake_case : List[Any] = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate scheduler
snake_case : int = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case : Tuple = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case : int = model(**__magic_name__ )
snake_case : Optional[int] = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case : List[str] = model(**__magic_name__ )
snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
snake_case , snake_case : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
snake_case : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__magic_name__ , default=__magic_name__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case : Optional[Any] = parser.parse_args()
snake_case : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 84 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_a : List[str] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def a_ ( __magic_name__ ) -> str:
"""simple docstring"""
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def a_ ( __magic_name__ ) -> str:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__magic_name__ )
def a_ ( __magic_name__ ) -> Any:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
snake_case : List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ )
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if exitstatus == 5:
snake_case : Optional[Any] = 0
# Doctest custom flag to ignore output.
_a : List[str] = doctest.register_optionflag('IGNORE_RESULT')
_a : List[str] = doctest.OutputChecker
class a_ ( a ):
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = CustomOutputChecker
_a : Optional[Any] = HfDoctestModule
_a : Optional[int] = HfDocTestParser
| 714 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_a : Dict = logging.get_logger(__name__)
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = None ) -> str:
"""simple docstring"""
snake_case : Any = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
snake_case : str = to_pil_image(__magic_name__ )
snake_case , snake_case : Union[str, Any] = pil_image.size
snake_case : List[Any] = pytesseract.image_to_data(__magic_name__ , lang=__magic_name__ , output_type='''dict''' , config=__magic_name__ )
snake_case , snake_case , snake_case , snake_case , snake_case : Optional[Any] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
snake_case : Union[str, Any] = [idx for idx, word in enumerate(__magic_name__ ) if not word.strip()]
snake_case : Union[str, Any] = [word for idx, word in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : Optional[Any] = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case : List[Any] = []
for x, y, w, h in zip(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
snake_case : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(__magic_name__ )
# finally, normalize the bounding boxes
snake_case : List[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__magic_name__ , __magic_name__ , __magic_name__ ) )
assert len(__magic_name__ ) == len(__magic_name__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a_ ( a ):
A__ : int = ['pixel_values']
def __init__( self : Optional[int] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "" , **UpperCAmelCase__ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Any = size if size is not None else {'''height''': 224, '''width''': 224}
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : Dict = do_resize
snake_case : str = size
snake_case : Optional[int] = resample
snake_case : Union[str, Any] = apply_ocr
snake_case : int = ocr_lang
snake_case : Union[str, Any] = tesseract_config
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ):
"""simple docstring"""
snake_case : Dict = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
snake_case : Tuple = (size['''height'''], size['''width'''])
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : List[Any] , ):
"""simple docstring"""
snake_case : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case : List[Any] = size if size is not None else self.size
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : str = resample if resample is not None else self.resample
snake_case : Optional[int] = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case : List[str] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
snake_case : Any = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
snake_case : Optional[int] = []
snake_case : Union[str, Any] = []
for image in images:
snake_case , snake_case : List[Any] = apply_tesseract(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
words_batch.append(UpperCAmelCase__ )
boxes_batch.append(UpperCAmelCase__ )
if do_resize:
snake_case : Any = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case : int = [flip_channel_order(UpperCAmelCase__ ) for image in images]
snake_case : Optional[Any] = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
snake_case : List[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase__ )
if apply_ocr:
snake_case : Dict = words_batch
snake_case : Dict = boxes_batch
return data
| 84 | 0 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __magic_name__ , __magic_name__ ) -> str:
"""simple docstring"""
if len(__magic_name__ ) <= 1 or n <= 1:
return
insert_next(__magic_name__ , n - 1 )
rec_insertion_sort(__magic_name__ , n - 1 )
def a_ ( __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
if index >= len(__magic_name__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
snake_case : Dict = (
collection[index],
collection[index - 1],
)
insert_next(__magic_name__ , index + 1 )
if __name__ == "__main__":
_a : Union[str, Any] = input('Enter integers separated by spaces: ')
_a : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 715 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[Any]=24 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Optional[int]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Optional[Any]=2 , ):
"""simple docstring"""
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : str = patch_size
snake_case : Union[str, Any] = max_length
snake_case : str = num_mel_bins
snake_case : Any = is_training
snake_case : Union[str, Any] = use_labels
snake_case : Tuple = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : str = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : str = scope
snake_case : int = frequency_stride
snake_case : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
snake_case : Any = (self.max_length - self.patch_size) // self.time_stride + 1
snake_case : Union[str, Any] = frequency_out_dimension * time_out_dimension
snake_case : Union[str, Any] = num_patches + 2
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
snake_case : str = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[str] = self.get_config()
return config, input_values, labels
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : str = ASTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : int = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : int = config_and_inputs
snake_case : Tuple = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class a_ ( a , a , unittest.TestCase ):
A__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
A__ : int = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Dict = False
A__ : int = False
A__ : Optional[int] = False
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[int] = ASTModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Any = model_class(UpperCAmelCase__ )
snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : List[str] = ['''input_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = ASTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Dict = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
snake_case , snake_case : int = torchaudio.load(__magic_name__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class a_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : List[str] = self.default_feature_extractor
snake_case : str = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(UpperCAmelCase__ )
snake_case : str = self.default_feature_extractor
snake_case , snake_case : int = prepare_audio()
snake_case : Optional[int] = audio.squeeze().numpy()
snake_case : Optional[Any] = feature_extractor(UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case : Union[str, Any] = model(**UpperCAmelCase__ )
# verify the logits
snake_case : Any = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
snake_case : str = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 84 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
def lowerCAmelCase( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any ):
"""simple docstring"""
return None
class a_ :
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return None
class a_ ( unittest.TestCase ):
A__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCAmelCase__ , '''tf''' , 12 , **UpperCAmelCase__ )
@require_torch
@slow
def lowerCAmelCase( self : int ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCAmelCase__ , '''pt''' , 12 , **UpperCAmelCase__ )
@require_torch
@slow
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
from transformers import BertModel
snake_case : Dict = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(UpperCAmelCase__ ) )
vocab_file.flush()
snake_case : List[Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
snake_case : Dict = BertModel(BertConfig(vocab_size=len(UpperCAmelCase__ ) ) )
model.save_pretrained(UpperCAmelCase__ )
self._test_export(UpperCAmelCase__ , '''pt''' , 12 , UpperCAmelCase__ )
@require_tf
@slow
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case : Dict = self._test_export(UpperCAmelCase__ , '''tf''' , 12 , **UpperCAmelCase__ )
snake_case : Dict = quantize(Path(UpperCAmelCase__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCAmelCase__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case : Optional[Any] = self._test_export(UpperCAmelCase__ , '''pt''' , 12 , **UpperCAmelCase__ )
snake_case : str = quantize(UpperCAmelCase__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCAmelCase__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : int ):
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
snake_case : Tuple = Path(UpperCAmelCase__ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
return path
except Exception as e:
self.fail(UpperCAmelCase__ )
@require_torch
@require_tokenizers
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
from transformers import BertModel
snake_case : int = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
snake_case : Tuple = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(UpperCAmelCase__ , UpperCAmelCase__ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def lowerCAmelCase( self : str ):
"""simple docstring"""
from transformers import TFBertModel
snake_case : Union[str, Any] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
snake_case : Optional[int] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(UpperCAmelCase__ , UpperCAmelCase__ , '''tf''' )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ):
"""simple docstring"""
snake_case : List[str] = FeatureExtractionPipeline(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Tuple = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
snake_case : Union[str, Any] = infer_shapes(UpperCAmelCase__ , UpperCAmelCase__ )
# Assert all variables are present
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , UpperCAmelCase__ )
self.assertSequenceEqual(variable_names[3:] , UpperCAmelCase__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : int = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
snake_case : Dict = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
snake_case : Optional[Any] = ensure_valid_input(FuncContiguousArgs() , UpperCAmelCase__ , UpperCAmelCase__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(UpperCAmelCase__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(UpperCAmelCase__ ) , set(UpperCAmelCase__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(UpperCAmelCase__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
snake_case : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() , UpperCAmelCase__ , UpperCAmelCase__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(UpperCAmelCase__ ) , 1 )
self.assertEqual(len(UpperCAmelCase__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Tuple = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 716 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : Union[str, Any] = logging.getLogger(__name__)
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class a_ :
A__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class a_ :
A__ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
A__ : str = field(metadata={'help': 'Should contain the data files for the task.'} )
A__ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A__ : bool = field(
default=a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case , snake_case , snake_case : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __magic_name__ )
# Set seed
set_seed(training_args.seed )
try:
snake_case : int = processors[data_args.task_name]()
snake_case : List[str] = processor.get_labels()
snake_case : str = len(__magic_name__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case : Optional[int] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__magic_name__ ) -> Dict:
snake_case : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__magic_name__ , p.label_ids )}
# Data collator
snake_case : Dict = DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case : List[Any] = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case : Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case : Optional[Any] = trainer.evaluate()
snake_case : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__magic_name__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__magic_name__ )
return results
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 84 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_a : Union[str, Any] = logging.get_logger(__name__)
def a_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case : Any = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
snake_case : Union[str, Any] = json.loads(__magic_name__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
snake_case : int = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
snake_case : str = json.loads(__magic_name__ )
if not mpi_options.get('''sagemaker_mpi_enabled''' , __magic_name__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class a_ ( a ):
A__ : str = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , UpperCAmelCase__ , )
@cached_property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
snake_case : int = torch.device('''cpu''' )
snake_case : Optional[Any] = 0
elif is_sagemaker_model_parallel_available():
snake_case : Union[str, Any] = smp.local_rank()
snake_case : Union[str, Any] = torch.device('''cuda''' , UpperCAmelCase__ )
snake_case : Dict = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta )
snake_case : Optional[Any] = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
snake_case : List[str] = torch.device('''cuda''' , self.local_rank )
snake_case : List[Any] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
snake_case : List[str] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
snake_case : List[str] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta )
snake_case : Union[str, Any] = torch.device('''cuda''' , self.local_rank )
snake_case : int = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCAmelCase__ )
return device
@property
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
return False
| 717 |
import re
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
snake_case : List[str] = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
_a : Any = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 84 | 0 |
from sklearn.metrics import mean_squared_error
import datasets
_a : Optional[int] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
_a : List[str] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
_a : Tuple = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="uniform_average" , UpperCAmelCase__ : int=True ):
"""simple docstring"""
snake_case : List[str] = mean_squared_error(
UpperCAmelCase__ , UpperCAmelCase__ , sample_weight=UpperCAmelCase__ , multioutput=UpperCAmelCase__ , squared=UpperCAmelCase__ )
return {"mse": mse}
| 718 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : int=18 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Optional[int]=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=True , ):
"""simple docstring"""
snake_case : int = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = num_channels
snake_case : Union[str, Any] = image_size
snake_case : Dict = min_resolution
snake_case : Dict = max_resolution
snake_case : int = do_resize
snake_case : List[str] = size
snake_case : List[Any] = apply_ocr
def lowerCAmelCase( self : int ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a_ ( a , unittest.TestCase ):
A__ : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''apply_ocr''' ) )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
# Initialize image_processing
snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
snake_case : Dict = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : List[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : Tuple = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# with apply_OCR = True
snake_case : int = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case : List[Any] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
snake_case : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
snake_case : Any = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case : Optional[Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
snake_case : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
snake_case : str = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
snake_case : Optional[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 84 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_a : Dict = logging.get_logger(__name__)
_a : Optional[int] = 'https://openaipublic.azureedge.net/jukebox/models/'
_a : int = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
snake_case : str = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
snake_case : Any = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
snake_case : Union[str, Any] = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
snake_case : Union[str, Any] = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
snake_case : int = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
snake_case : str = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
snake_case : str = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
snake_case : Optional[Any] = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
snake_case : Union[str, Any] = {}
import re
snake_case : List[Any] = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
snake_case : int = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
snake_case : Optional[Any] = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
snake_case : Dict = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
snake_case : List[str] = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
snake_case : str = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
snake_case : List[Any] = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
snake_case : Any = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
snake_case : int = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__magic_name__ ):
snake_case : Union[str, Any] = re_encoder_block_conv_in.match(__magic_name__ )
snake_case : List[Any] = regex_match.groups()
snake_case : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] )
snake_case : List[str] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
snake_case : Any = re_encoder_block_conv_in.sub(__magic_name__ , __magic_name__ )
elif re_encoder_block_resnet.fullmatch(__magic_name__ ):
snake_case : Tuple = re_encoder_block_resnet.match(__magic_name__ )
snake_case : int = regex_match.groups()
snake_case : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] )
snake_case : Optional[int] = {'''1''': 1, '''3''': 2}[groups[-2]]
snake_case : Any = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
snake_case : List[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
snake_case : Any = prefix + resnet_block
snake_case : str = re_encoder_block_resnet.sub(__magic_name__ , __magic_name__ )
elif re_encoder_block_proj_out.fullmatch(__magic_name__ ):
snake_case : Dict = re_encoder_block_proj_out.match(__magic_name__ )
snake_case : Dict = regex_match.groups()
snake_case : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
snake_case : Union[str, Any] = re_encoder_block_proj_out.sub(__magic_name__ , __magic_name__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__magic_name__ ):
snake_case : List[Any] = re_decoder_block_conv_out.match(__magic_name__ )
snake_case : List[str] = regex_match.groups()
snake_case : Optional[int] = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case : Any = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
snake_case : Union[str, Any] = re_decoder_block_conv_out.sub(__magic_name__ , __magic_name__ )
elif re_decoder_block_resnet.fullmatch(__magic_name__ ):
snake_case : str = re_decoder_block_resnet.match(__magic_name__ )
snake_case : Tuple = regex_match.groups()
snake_case : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case : Union[str, Any] = {'''1''': 1, '''3''': 2}[groups[-2]]
snake_case : List[str] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
snake_case : str = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
snake_case : Any = prefix + resnet_block
snake_case : List[Any] = re_decoder_block_resnet.sub(__magic_name__ , __magic_name__ )
elif re_decoder_block_proj_in.fullmatch(__magic_name__ ):
snake_case : Optional[Any] = re_decoder_block_proj_in.match(__magic_name__ )
snake_case : Any = regex_match.groups()
snake_case : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
snake_case : Optional[int] = re_decoder_block_proj_in.sub(__magic_name__ , __magic_name__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__magic_name__ ):
snake_case : int = re_prior_cond_conv_out.match(__magic_name__ )
snake_case : Optional[Any] = regex_match.groups()
snake_case : Dict = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case : Dict = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
snake_case : str = re_prior_cond_conv_out.sub(__magic_name__ , __magic_name__ )
elif re_prior_cond_resnet.fullmatch(__magic_name__ ):
snake_case : List[str] = re_prior_cond_resnet.match(__magic_name__ )
snake_case : Optional[Any] = regex_match.groups()
snake_case : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case : Dict = {'''1''': 1, '''3''': 2}[groups[-2]]
snake_case : Union[str, Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
snake_case : Union[str, Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
snake_case : List[str] = prefix + resnet_block
snake_case : Dict = re_prior_cond_resnet.sub(__magic_name__ , __magic_name__ )
elif re_prior_cond_proj_in.fullmatch(__magic_name__ ):
snake_case : Dict = re_prior_cond_proj_in.match(__magic_name__ )
snake_case : int = regex_match.groups()
snake_case : Union[str, Any] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
snake_case : int = re_prior_cond_proj_in.sub(__magic_name__ , __magic_name__ )
# keep original key
else:
snake_case : Optional[int] = original_key
snake_case : Union[str, Any] = replace_key(__magic_name__ )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
snake_case : List[str] = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
snake_case : List[str] = original_key
snake_case : Optional[Any] = original_key
snake_case : Optional[Any] = value
return new_dict
@torch.no_grad()
def a_ ( __magic_name__=None , __magic_name__=None ) -> str:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
snake_case : Dict = requests.get(F"{PREFIX}{file}" , allow_redirects=__magic_name__ )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=__magic_name__ )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , '''wb''' ).write(r.content )
snake_case : str = MODEL_MAPPING[model_name.split('''/''' )[-1]]
snake_case : Dict = JukeboxConfig.from_pretrained(__magic_name__ )
snake_case : Optional[int] = JukeboxModel(__magic_name__ )
snake_case : str = []
snake_case : str = {}
for i, dict_name in enumerate(__magic_name__ ):
snake_case : List[str] = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['''model''']
snake_case : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
snake_case : Union[str, Any] = old_dic[k]
elif k.endswith('''.w''' ):
snake_case : str = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
snake_case : str = old_dic[k]
else:
snake_case : Any = old_dic[k]
snake_case : List[str] = '''vqvae''' if i == 0 else F"priors.{3 - i}"
snake_case : str = fix_jukebox_keys(__magic_name__ , model.state_dict() , __magic_name__ , __magic_name__ )
weight_dict.append(__magic_name__ )
snake_case : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(__magic_name__ )
for i in range(len(__magic_name__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
with open(F"{pytorch_dump_folder_path}/mapping.json" , '''w''' ) as txtfile:
json.dump(__magic_name__ , __magic_name__ )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
return weight_dict
if __name__ == "__main__":
_a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
_a : Optional[Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 719 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=8 ) -> str:
"""simple docstring"""
snake_case : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if latents is None:
snake_case : int = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case : Optional[Any] = latents.to(UpperCAmelCase__ )
snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case : Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
snake_case : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Optional[int] = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
snake_case : Optional[int] = self._execution_device
snake_case : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Any = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : int = torch.cat(UpperCAmelCase__ , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Dict = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Tuple = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
snake_case : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
snake_case : str = self.scheduler.timesteps
snake_case : Optional[Any] = self.movq.config.latent_channels
snake_case , snake_case : Optional[Any] = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
snake_case : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Optional[int] = {'''image_embeds''': image_embeds, '''hint''': hint}
snake_case : Any = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Any = noise_pred.chunk(2 )
snake_case , snake_case : Dict = variance_pred.chunk(2 )
snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
snake_case : List[Any] = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case : Optional[Any] = image * 0.5 + 0.5
snake_case : int = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : str = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 0 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 720 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a_ ( a , unittest.TestCase ):
A__ : Dict = ReformerTokenizer
A__ : Optional[int] = ReformerTokenizerFast
A__ : str = True
A__ : Tuple = False
A__ : str = True
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
super().setUp()
snake_case : str = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = '''<s>'''
snake_case : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1_000 )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case : Any = self.get_tokenizer()
snake_case : str = self.get_rust_tokenizer()
snake_case : Tuple = '''I was born in 92000, and this is falsé.'''
snake_case : str = tokenizer.tokenize(UpperCAmelCase__ )
snake_case : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
snake_case : List[str] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : Optional[int] = tokenizer.encode(UpperCAmelCase__ )
snake_case : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any]=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : str = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# Simple input
snake_case : Union[str, Any] = '''This is a simple input'''
snake_case : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case : int = ('''This is a simple input''', '''This is a pair''')
snake_case : int = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
snake_case : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
snake_case : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case : int = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Any = '''Hello World!'''
snake_case : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case : Union[str, Any] = ''' '''.join(UpperCAmelCase__ )
snake_case : Optional[int] = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' )
snake_case : List[str] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
snake_case : Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case : Tuple = encoded_sequence['''input_ids'''].shape
snake_case : List[Any] = ReformerModel(UpperCAmelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
# fmt: off
snake_case : Tuple = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case : Tuple = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=UpperCAmelCase__ , sequences=UpperCAmelCase__ , )
| 84 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=8 ) -> str:
"""simple docstring"""
snake_case : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
if latents is None:
snake_case : int = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case : Optional[Any] = latents.to(UpperCAmelCase__ )
snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case : Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
snake_case : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Any=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case : Optional[int] = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase( self : Union[str, Any] ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
snake_case : Optional[int] = self._execution_device
snake_case : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Any = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : int = torch.cat(UpperCAmelCase__ , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Dict = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Tuple = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
snake_case : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
snake_case : str = self.scheduler.timesteps
snake_case : Optional[Any] = self.movq.config.latent_channels
snake_case : Optional[Any] = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
snake_case : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Optional[int] = {'''image_embeds''': image_embeds, '''hint''': hint}
snake_case : Any = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1 )
snake_case : Any = noise_pred.chunk(2 )
snake_case : Dict = variance_pred.chunk(2 )
snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
snake_case : List[Any] = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case : Optional[Any] = image * 0.5 + 0.5
snake_case : int = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : str = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 721 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case , snake_case : Any = image.size
snake_case , snake_case : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case : Dict = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
snake_case : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case : Tuple = torch.from_numpy(__magic_name__ )
return 2.0 * image - 1.0
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : VQModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[int] = 100 , UpperCAmelCase__ : Optional[float] = 0.0 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[int] = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
snake_case : Any = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase__ )}" )
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[Any] = preprocess(UpperCAmelCase__ )
snake_case , snake_case : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case : str = next(self.unet.parameters() ).dtype
snake_case : Dict = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
snake_case : Any = image.to(device=self.device , dtype=UpperCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device )
snake_case : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_eta:
snake_case : Dict = eta
for t in self.progress_bar(UpperCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
snake_case : Optional[int] = torch.cat([latents, image] , dim=1 )
snake_case : str = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
snake_case : int = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : Any = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
snake_case : Optional[int] = self.vqvae.decode(UpperCAmelCase__ ).sample
snake_case : int = torch.clamp(UpperCAmelCase__ , -1.0 , 1.0 )
snake_case : Dict = image / 2 + 0.5
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Any = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_a : Dict = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_a : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class a_ ( a ):
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Dict = 5
# Realm tok
snake_case : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
snake_case : Any = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[int] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Dict = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=UpperCAmelCase__ , )
return block_records
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Tuple = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = self.get_config()
snake_case : Optional[Any] = self.get_dummy_retriever()
snake_case : Optional[int] = retriever.tokenizer
snake_case : Dict = np.array([0, 3] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Union[str, Any] = tokenizer(
['''the fourth'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : Optional[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : List[str] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.get_config()
snake_case : Optional[int] = self.get_dummy_retriever()
snake_case : List[str] = retriever.tokenizer
snake_case : Optional[Any] = np.array([0, 3, 5] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Any = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : List[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : Union[str, Any] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual([False, True, True] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
snake_case : Optional[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
snake_case : Any = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case : Any = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 84 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : str = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class a_ ( a ):
A__ : str = 'sew-d'
def __init__( self : Tuple , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Tuple=768 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Any=3_072 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Optional[int]=512 , UpperCAmelCase__ : Tuple=256 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=("p2c", "c2p") , UpperCAmelCase__ : int="layer_norm" , UpperCAmelCase__ : List[str]="gelu_python" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : Tuple=1e-7 , UpperCAmelCase__ : Union[str, Any]=1e-5 , UpperCAmelCase__ : List[Any]="group" , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Dict=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCAmelCase__ : Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase__ : Any=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : List[Any]=128 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Tuple=0.05 , UpperCAmelCase__ : Tuple=10 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : Dict=10 , UpperCAmelCase__ : List[str]=0 , UpperCAmelCase__ : List[Any]="mean" , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : Union[str, Any]=256 , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : str=2 , **UpperCAmelCase__ : Optional[int] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
snake_case : Dict = hidden_size
snake_case : List[str] = feat_extract_norm
snake_case : List[str] = feat_extract_activation
snake_case : int = list(UpperCAmelCase__ )
snake_case : Optional[Any] = list(UpperCAmelCase__ )
snake_case : Any = list(UpperCAmelCase__ )
snake_case : Union[str, Any] = conv_bias
snake_case : Dict = num_conv_pos_embeddings
snake_case : List[str] = num_conv_pos_embedding_groups
snake_case : Any = len(self.conv_dim )
snake_case : Optional[int] = num_hidden_layers
snake_case : Dict = intermediate_size
snake_case : List[str] = squeeze_factor
snake_case : Optional[Any] = max_position_embeddings
snake_case : str = position_buckets
snake_case : str = share_att_key
snake_case : Optional[int] = relative_attention
snake_case : Dict = norm_rel_ebd
snake_case : str = list(UpperCAmelCase__ )
snake_case : Optional[int] = hidden_act
snake_case : Union[str, Any] = num_attention_heads
snake_case : str = hidden_dropout
snake_case : Optional[int] = attention_dropout
snake_case : Any = activation_dropout
snake_case : Optional[Any] = feat_proj_dropout
snake_case : List[Any] = final_dropout
snake_case : Any = layer_norm_eps
snake_case : Optional[int] = feature_layer_norm_eps
snake_case : str = initializer_range
snake_case : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case : List[str] = apply_spec_augment
snake_case : Optional[int] = mask_time_prob
snake_case : Union[str, Any] = mask_time_length
snake_case : List[Any] = mask_time_min_masks
snake_case : Dict = mask_feature_prob
snake_case : Optional[Any] = mask_feature_length
snake_case : int = mask_feature_min_masks
# ctc loss
snake_case : Optional[Any] = ctc_loss_reduction
snake_case : int = ctc_zero_infinity
# sequence classification
snake_case : str = use_weighted_layer_sum
snake_case : List[Any] = classifier_proj_size
@property
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 701 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : str = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 | 0 |
from maths.prime_check import is_prime
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
snake_case : Dict = F"Input value of [number={number}] must be an integer"
raise TypeError(__magic_name__ )
if is_prime(__magic_name__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_a : Union[str, Any] = {
'yjernite/retribert-base-uncased': 512,
}
_a : Tuple = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class a_ ( a ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Any = PRETRAINED_INIT_CONFIGURATION
A__ : Optional[Any] = RetriBertTokenizer
A__ : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict="[UNK]" , UpperCAmelCase__ : str="[SEP]" , UpperCAmelCase__ : Union[str, Any]="[PAD]" , UpperCAmelCase__ : Dict="[CLS]" , UpperCAmelCase__ : Optional[Any]="[MASK]" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Dict , ):
"""simple docstring"""
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
snake_case : int = getattr(UpperCAmelCase__ , normalizer_state.pop('''type''' ) )
snake_case : List[Any] = do_lower_case
snake_case : Union[str, Any] = strip_accents
snake_case : int = tokenize_chinese_chars
snake_case : int = normalizer_class(**UpperCAmelCase__ )
snake_case : Union[str, Any] = do_lower_case
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=None ):
"""simple docstring"""
snake_case : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : List[Any] = [self.sep_token_id]
snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
snake_case : Tuple = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 84 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Dict = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
snake_case : Optional[int] = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase__ ) , torch_builtin(UpperCAmelCase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase__ ) , gelu_new(UpperCAmelCase__ ) ) )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Optional[int] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
snake_case : str = get_activation('''gelu''' )
snake_case : Optional[Any] = get_activation('''gelu_10''' )
snake_case : Union[str, Any] = torch_builtin(UpperCAmelCase__ )
snake_case : Tuple = geluaa(UpperCAmelCase__ )
snake_case : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCAmelCase__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase( self : int ):
"""simple docstring"""
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(UpperCAmelCase__ ):
get_activation('''bogus''' )
with self.assertRaises(UpperCAmelCase__ ):
get_activation(UpperCAmelCase__ )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Dict = get_activation('''gelu''' )
snake_case : Union[str, Any] = 1
snake_case : Union[str, Any] = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCAmelCase__ ):
snake_case : List[str] = acta.a
| 703 |
import string
import numpy
def a_ ( __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , __magic_name__ )
class a_ :
A__ : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
A__ : List[str] = numpy.vectorize(lambda a : x % 36 )
A__ : Dict = numpy.vectorize(a )
def __init__( self : List[str] , UpperCAmelCase__ : numpy.ndarray ):
"""simple docstring"""
snake_case : int = self.modulus(UpperCAmelCase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
snake_case : List[str] = encrypt_key.shape[0]
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
return self.key_string.index(UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : int ):
"""simple docstring"""
return self.key_string[round(UpperCAmelCase__ )]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : Tuple = det % len(self.key_string )
snake_case : Tuple = len(self.key_string )
if greatest_common_divisor(UpperCAmelCase__ , len(self.key_string ) ) != 1:
snake_case : List[Any] = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = [char for char in text.upper() if char in self.key_string]
snake_case : Optional[int] = chars[-1]
while len(UpperCAmelCase__ ) % self.break_key != 0:
chars.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = self.process_text(text.upper() )
snake_case : Optional[int] = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : int = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : Tuple = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(self.encrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[
0
]
snake_case : Dict = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : int = det % len(self.key_string )
snake_case : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
snake_case : Any = i
break
snake_case : Any = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCAmelCase__ ) )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Any = self.make_decrypt_key()
snake_case : Optional[Any] = self.process_text(text.upper() )
snake_case : int = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : Any = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : List[str] = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(decrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[0]
snake_case : int = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a_ ( ) -> None:
"""simple docstring"""
snake_case : Any = int(input('''Enter the order of the encryption key: ''' ) )
snake_case : List[Any] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(__magic_name__ ):
snake_case : Optional[Any] = [int(__magic_name__ ) for x in input().split()]
hill_matrix.append(__magic_name__ )
snake_case : List[str] = HillCipher(numpy.array(__magic_name__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
snake_case : int = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
snake_case : List[Any] = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(__magic_name__ ) )
elif option == "2":
snake_case : int = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 84 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_a : List[Any] = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__magic_name__ )
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case : Tuple = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ )
| 704 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( a ):
A__ : List[Any] = 'Salesforce/blip-image-captioning-base'
A__ : Dict = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
A__ : str = 'image_captioner'
A__ : Dict = AutoModelForVisionaSeq
A__ : Optional[Any] = ['image']
A__ : List[str] = ['text']
def __init__( self : List[str] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=UpperCAmelCase__ , return_tensors='''pt''' )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return self.model.generate(**UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0].strip()
| 84 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
_a : Dict = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_a : Dict = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def a_ ( __magic_name__ ) -> list[list[int]]:
"""simple docstring"""
snake_case : Union[str, Any] = []
for i in range(len(__magic_name__ ) ):
snake_case : str = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
snake_case : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__magic_name__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__magic_name__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__magic_name__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
snake_case : Any = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__magic_name__ )
return next_generation
def a_ ( __magic_name__ , __magic_name__ ) -> list[Image.Image]:
"""simple docstring"""
snake_case : str = []
for _ in range(__magic_name__ ):
# Create output image
snake_case : List[Any] = Image.new('''RGB''' , (len(cells[0] ), len(__magic_name__ )) )
snake_case : Any = img.load()
# Save cells to image
for x in range(len(__magic_name__ ) ):
for y in range(len(cells[0] ) ):
snake_case : Dict = 255 - cells[y][x] * 255
snake_case : int = (colour, colour, colour)
# Save image
images.append(__magic_name__ )
snake_case : Union[str, Any] = new_generation(__magic_name__ )
return images
if __name__ == "__main__":
_a : Optional[Any] = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 705 |
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
snake_case : int = 4
snake_case : Optional[Any] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 84 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_a : Tuple = (3, 9, -11, 0, 7, 5, 1, -1)
_a : Tuple = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class a_ :
A__ : int
A__ : Node | None
class a_ :
def __init__( self : int , UpperCAmelCase__ : Iterable[int] ):
"""simple docstring"""
snake_case : Node | None = None
for i in sorted(UpperCAmelCase__ , reverse=UpperCAmelCase__ ):
snake_case : str = Node(UpperCAmelCase__ , self.head )
def __iter__( self : Optional[Any] ):
"""simple docstring"""
snake_case : Any = self.head
while node:
yield node.data
snake_case : str = node.next_node
def __len__( self : str ):
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self : Optional[int] ):
"""simple docstring"""
return " -> ".join([str(UpperCAmelCase__ ) for node in self] )
def a_ ( __magic_name__ , __magic_name__ ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__magic_name__ ) + list(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : Union[str, Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 706 |
from sklearn.metrics import fa_score
import datasets
_a : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
_a : Dict = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
_a : List[Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : List[str]="binary" , UpperCAmelCase__ : str=None ):
"""simple docstring"""
snake_case : List[Any] = fa_score(
UpperCAmelCase__ , UpperCAmelCase__ , labels=UpperCAmelCase__ , pos_label=UpperCAmelCase__ , average=UpperCAmelCase__ , sample_weight=UpperCAmelCase__ )
return {"f1": float(UpperCAmelCase__ ) if score.size == 1 else score}
| 84 | 0 |
import torch
from diffusers import DiffusionPipeline
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
def __call__( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
snake_case : Dict = 1
snake_case : Optional[Any] = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
snake_case : List[Any] = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
snake_case : List[Any] = scheduler_output - scheduler_output + torch.ones_like(UpperCAmelCase__ )
return result
| 707 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('''only integers accepted as input''' )
else:
snake_case : str = str(abs(__magic_name__ ) )
snake_case : Optional[Any] = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int(''''''.join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 84 | 0 |
import math
def a_ ( __magic_name__ ) -> str:
"""simple docstring"""
snake_case : int = 0
snake_case : List[str] = 0
while num > 0:
snake_case : str = num % 8
snake_case : str = octal + (remainder * math.floor(math.pow(10 , __magic_name__ ) ))
counter += 1
snake_case : List[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"0o{int(__magic_name__ )}"
def a_ ( ) -> None:
"""simple docstring"""
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(216 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(512 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 708 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=99 , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Any=9 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : str=0.002 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , ):
"""simple docstring"""
snake_case : Union[str, Any] = parent
snake_case : Union[str, Any] = batch_size
snake_case : Any = encoder_seq_length
snake_case : str = decoder_seq_length
# For common tests
snake_case : Optional[int] = self.decoder_seq_length
snake_case : Optional[Any] = is_training
snake_case : List[Any] = use_attention_mask
snake_case : Union[str, Any] = use_labels
snake_case : Any = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : Any = d_ff
snake_case : Any = relative_attention_num_buckets
snake_case : Optional[Any] = dropout_rate
snake_case : int = initializer_factor
snake_case : Optional[Any] = eos_token_id
snake_case : Dict = pad_token_id
snake_case : Optional[Any] = decoder_start_token_id
snake_case : Union[str, Any] = None
snake_case : List[str] = decoder_layers
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
return TaConfig.from_pretrained('''google/umt5-base''' )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
snake_case : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case : List[Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if decoder_head_mask is None:
snake_case : Tuple = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if cross_attn_head_mask is None:
snake_case : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case : List[str] = input_ids.clamp(self.pad_token_id + 1 )
snake_case : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case : str = self.get_config()
snake_case : Tuple = config.num_attention_heads
snake_case : List[Any] = self.prepare_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, input_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , ):
"""simple docstring"""
snake_case : str = UMTaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : str = model(
input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , )
snake_case : int = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
snake_case : int = result.last_hidden_state
snake_case : Dict = result.past_key_values
snake_case : Dict = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval()
# first forward pass
snake_case : List[Any] = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
snake_case : List[Any] = model(UpperCAmelCase__ )
snake_case : Any = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) + 1 )
snake_case , snake_case : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : Any = model(UpperCAmelCase__ )['''last_hidden_state''']
snake_case : Optional[Any] = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )['''last_hidden_state''']
# select random slice
snake_case : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).half().eval()
snake_case : str = model(**UpperCAmelCase__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(UpperCAmelCase__ ).any().item() )
@require_torch
class a_ ( a , a , a , unittest.TestCase ):
A__ : str = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A__ : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A__ : Any = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A__ : Dict = True
A__ : List[str] = False
A__ : Optional[int] = False
A__ : Optional[int] = True
A__ : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A__ : int = [0.8, 0.9]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
snake_case : Optional[Any] = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=UpperCAmelCase__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Optional[int] = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
snake_case : int = config_and_inputs[0]
snake_case : Union[str, Any] = UMTaForConditionalGeneration(UpperCAmelCase__ ).eval()
model.to(UpperCAmelCase__ )
snake_case : str = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase__ , head_masking.items() ):
snake_case : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case : List[str] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ )
snake_case : Union[str, Any] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , **UpperCAmelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=UpperCAmelCase__ ).to(UpperCAmelCase__ )
snake_case : int = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=UpperCAmelCase__ , legacy=UpperCAmelCase__ )
snake_case : List[str] = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
snake_case : Dict = tokenizer(UpperCAmelCase__ , return_tensors='''pt''' , padding=UpperCAmelCase__ ).input_ids
# fmt: off
snake_case : Optional[Any] = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = model.generate(input_ids.to(UpperCAmelCase__ ) )
snake_case : int = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
snake_case : Tuple = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 84 | 0 |
import argparse
from collections import defaultdict
import yaml
_a : int = 'docs/source/en/_toctree.yml'
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
snake_case : Tuple = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
snake_case : Union[str, Any] = [key for key, value in counts.items() if value > 1]
snake_case : Union[str, Any] = []
for duplicate_key in duplicates:
snake_case : Dict = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def a_ ( __magic_name__=False ) -> Optional[int]:
"""simple docstring"""
with open(__magic_name__ , encoding='''utf-8''' ) as f:
snake_case : Union[str, Any] = yaml.safe_load(f.read() )
# Get to the API doc
snake_case : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case : int = content[api_idx]['''sections''']
# Then to the model doc
snake_case : Optional[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
snake_case : Dict = api_doc[model_idx]['''sections''']
snake_case : List[str] = [(idx, section) for idx, section in enumerate(__magic_name__ ) if '''sections''' in section]
snake_case : Optional[Any] = False
for idx, modality_doc in modalities_docs:
snake_case : Optional[Any] = modality_doc['''sections''']
snake_case : List[Any] = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
snake_case : Any = True
if overwrite:
snake_case : Union[str, Any] = new_modality_doc
if diff:
if overwrite:
snake_case : Dict = model_doc
snake_case : Any = api_doc
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_a : Optional[int] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 709 |
import torch
from diffusers import DiffusionPipeline
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
def __call__( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
snake_case : Dict = 1
snake_case : Optional[Any] = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
snake_case : List[Any] = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
snake_case : List[Any] = scheduler_output - scheduler_output + torch.ones_like(UpperCAmelCase__ )
return result
| 84 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a : int = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_a : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 710 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( a ):
A__ : List[str] = ['image_processor', 'tokenizer']
A__ : Any = 'CLIPImageProcessor'
A__ : Optional[int] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase__ , )
snake_case : List[Any] = kwargs.pop('''feature_extractor''' )
snake_case : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self : Any , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case : int = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if images is not None:
snake_case : Dict = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and images is not None:
snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : str ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : int = self.tokenizer.model_input_names
snake_case : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase__ , )
return self.image_processor_class
@property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase__ , )
return self.image_processor
| 84 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case : Any = image.size
snake_case : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case : Dict = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
snake_case : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case : Tuple = torch.from_numpy(__magic_name__ )
return 2.0 * image - 1.0
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : VQModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[int] = 100 , UpperCAmelCase__ : Optional[float] = 0.0 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[int] = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
snake_case : Any = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase__ )}" )
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[Any] = preprocess(UpperCAmelCase__ )
snake_case : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case : str = next(self.unet.parameters() ).dtype
snake_case : Dict = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
snake_case : Any = image.to(device=self.device , dtype=UpperCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device )
snake_case : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_eta:
snake_case : Dict = eta
for t in self.progress_bar(UpperCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
snake_case : Optional[int] = torch.cat([latents, image] , dim=1 )
snake_case : str = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
snake_case : int = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : Any = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
snake_case : Optional[int] = self.vqvae.decode(UpperCAmelCase__ ).sample
snake_case : int = torch.clamp(UpperCAmelCase__ , -1.0 , 1.0 )
snake_case : Dict = image / 2 + 0.5
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Any = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 711 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_a : Optional[Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
_a : str = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
_a : List[Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Optional[int]=0.9 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : List[Any]=500 , UpperCAmelCase__ : Union[str, Any]="gpt2-large" , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : List[Any]=25 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=25 , ):
"""simple docstring"""
snake_case : List[str] = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 84 | 0 |
def a_ ( __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : Dict = [0] * len(__magic_name__ )
snake_case : List[Any] = []
snake_case : Optional[int] = [1] * len(__magic_name__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__magic_name__ ) ):
if indegree[i] == 0:
queue.append(__magic_name__ )
while queue:
snake_case : Dict = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case : List[Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__magic_name__ )
print(max(__magic_name__ ) )
# Adjacency list of Graph
_a : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 712 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
if "cls_token" in name:
snake_case : Tuple = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
snake_case : Optional[int] = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
snake_case : List[str] = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case : List[Any] = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case : int = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
snake_case : int = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
snake_case : Optional[Any] = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
snake_case : str = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case : Any = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
snake_case : Dict = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
snake_case : Dict = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
snake_case : Dict = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
snake_case : Optional[int] = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def a_ ( __magic_name__ , __magic_name__ ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case : Union[str, Any] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
snake_case : Optional[int] = key.split('''.''' )
snake_case : int = int(key_split[1] )
if "decoder_blocks" in key:
snake_case : List[str] = config.decoder_hidden_size
snake_case : List[Any] = '''decoder.decoder_layers.'''
if "weight" in key:
snake_case : str = val[:dim, :]
snake_case : Optional[Any] = val[dim : dim * 2, :]
snake_case : Any = val[-dim:, :]
elif "bias" in key:
snake_case : Optional[Any] = val[:dim]
snake_case : List[Any] = val[dim : dim * 2]
snake_case : List[Any] = val[-dim:]
else:
snake_case : Optional[int] = config.hidden_size
snake_case : Tuple = '''vit.encoder.layer.'''
if "weight" in key:
snake_case : Optional[Any] = val[:dim, :]
snake_case : str = val[dim : dim * 2, :]
snake_case : Union[str, Any] = val[-dim:, :]
elif "bias" in key:
snake_case : Tuple = val[:dim]
snake_case : int = val[dim : dim * 2]
snake_case : Optional[Any] = val[-dim:]
else:
snake_case : Optional[Any] = val
return orig_state_dict
def a_ ( __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : List[str] = ViTMAEConfig()
if "large" in checkpoint_url:
snake_case : str = 1_024
snake_case : Tuple = 4_096
snake_case : Optional[Any] = 24
snake_case : List[Any] = 16
elif "huge" in checkpoint_url:
snake_case : Tuple = 14
snake_case : int = 1_280
snake_case : Dict = 5_120
snake_case : Tuple = 32
snake_case : Optional[Any] = 16
snake_case : Optional[Any] = ViTMAEForPreTraining(__magic_name__ )
snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''model''']
snake_case : int = ViTMAEImageProcessor(size=config.image_size )
snake_case : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
snake_case : Tuple = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
snake_case : List[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
snake_case : Dict = ViTMAEImageProcessor(size=config.image_size )
snake_case : str = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
snake_case : Union[str, Any] = model(**__magic_name__ )
snake_case : Optional[Any] = outputs.logits
if "large" in checkpoint_url:
snake_case : Any = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
snake_case : List[Any] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
snake_case : Dict = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a : str = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 84 | 0 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_a : List[Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_a : Any = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
_a : Tuple = spec.loader.load_module()
_a : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_a : Any = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
_a : Optional[int] = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def a_ ( ) -> Tuple:
"""simple docstring"""
snake_case : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case : Optional[Any] = False
# source code of `config_class`
snake_case : Any = inspect.getsource(__magic_name__ )
snake_case : Union[str, Any] = _re_checkpoint.findall(__magic_name__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case : str = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
snake_case : Tuple = True
break
snake_case : Optional[int] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case : Union[str, Any] = '''\n'''.join(sorted(__magic_name__ ) )
raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 713 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : Optional[Any] = 16
_a : Union[str, Any] = 32
def a_ ( __magic_name__ , __magic_name__ = 16 ) -> Dict:
"""simple docstring"""
snake_case : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case : Union[str, Any] = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case : Dict = 8
else:
snake_case : Union[str, Any] = None
return tokenizer.pad(
__magic_name__ , padding='''longest''' , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
snake_case : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : Optional[int] = mocked_dataloaders # noqa: F811
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __magic_name__ ) == "1":
snake_case : Optional[int] = 2
# Initialize accelerator
snake_case : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : Dict = config['''lr''']
snake_case : Any = int(config['''num_epochs'''] )
snake_case : List[str] = int(config['''seed'''] )
snake_case : List[Any] = int(config['''batch_size'''] )
snake_case : Tuple = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : str = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case : Optional[int] = AdamW(params=model.parameters() , lr=__magic_name__ )
snake_case , snake_case : List[Any] = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate scheduler
snake_case : int = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case : Tuple = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case : int = model(**__magic_name__ )
snake_case : Optional[int] = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case : List[str] = model(**__magic_name__ )
snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
snake_case , snake_case : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
snake_case : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__magic_name__ , default=__magic_name__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case : Optional[Any] = parser.parse_args()
snake_case : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 84 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : str = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 714 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_a : Dict = logging.get_logger(__name__)
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = None ) -> str:
"""simple docstring"""
snake_case : Any = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
snake_case : str = to_pil_image(__magic_name__ )
snake_case , snake_case : Union[str, Any] = pil_image.size
snake_case : List[Any] = pytesseract.image_to_data(__magic_name__ , lang=__magic_name__ , output_type='''dict''' , config=__magic_name__ )
snake_case , snake_case , snake_case , snake_case , snake_case : Optional[Any] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
snake_case : Union[str, Any] = [idx for idx, word in enumerate(__magic_name__ ) if not word.strip()]
snake_case : Union[str, Any] = [word for idx, word in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : Optional[Any] = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case : List[Any] = []
for x, y, w, h in zip(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
snake_case : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(__magic_name__ )
# finally, normalize the bounding boxes
snake_case : List[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__magic_name__ , __magic_name__ , __magic_name__ ) )
assert len(__magic_name__ ) == len(__magic_name__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a_ ( a ):
A__ : int = ['pixel_values']
def __init__( self : Optional[int] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "" , **UpperCAmelCase__ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Any = size if size is not None else {'''height''': 224, '''width''': 224}
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : Dict = do_resize
snake_case : str = size
snake_case : Optional[int] = resample
snake_case : Union[str, Any] = apply_ocr
snake_case : int = ocr_lang
snake_case : Union[str, Any] = tesseract_config
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ):
"""simple docstring"""
snake_case : Dict = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
snake_case : Tuple = (size['''height'''], size['''width'''])
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : List[Any] , ):
"""simple docstring"""
snake_case : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case : List[Any] = size if size is not None else self.size
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : str = resample if resample is not None else self.resample
snake_case : Optional[int] = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case : List[str] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
snake_case : Any = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
snake_case : Optional[int] = []
snake_case : Union[str, Any] = []
for image in images:
snake_case , snake_case : List[Any] = apply_tesseract(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
words_batch.append(UpperCAmelCase__ )
boxes_batch.append(UpperCAmelCase__ )
if do_resize:
snake_case : Any = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case : int = [flip_channel_order(UpperCAmelCase__ ) for image in images]
snake_case : Optional[Any] = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
snake_case : List[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase__ )
if apply_ocr:
snake_case : Dict = words_batch
snake_case : Dict = boxes_batch
return data
| 84 | 0 |
'''simple docstring'''
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
snake_case : List[Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def a_ ( __magic_name__ = 100 ) -> int:
"""simple docstring"""
snake_case : List[Any] = 1
snake_case : Tuple = 2
for i in range(2 , max_n + 1 ):
snake_case : Optional[int] = pre_numerator
snake_case : int = 2 * i // 3 if i % 3 == 0 else 1
snake_case : Optional[int] = cur_numerator
snake_case : Union[str, Any] = e_cont * pre_numerator + temp
return sum_digits(__magic_name__ )
if __name__ == "__main__":
print(f"{solution() = }")
| 715 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[Any]=24 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Optional[int]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Optional[Any]=2 , ):
"""simple docstring"""
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : str = patch_size
snake_case : Union[str, Any] = max_length
snake_case : str = num_mel_bins
snake_case : Any = is_training
snake_case : Union[str, Any] = use_labels
snake_case : Tuple = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : str = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : str = scope
snake_case : int = frequency_stride
snake_case : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
snake_case : Any = (self.max_length - self.patch_size) // self.time_stride + 1
snake_case : Union[str, Any] = frequency_out_dimension * time_out_dimension
snake_case : Union[str, Any] = num_patches + 2
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
snake_case : str = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[str] = self.get_config()
return config, input_values, labels
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : str = ASTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : int = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : int = config_and_inputs
snake_case : Tuple = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class a_ ( a , a , unittest.TestCase ):
A__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
A__ : int = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Dict = False
A__ : int = False
A__ : Optional[int] = False
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[int] = ASTModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Any = model_class(UpperCAmelCase__ )
snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : List[str] = ['''input_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = ASTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Dict = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
snake_case , snake_case : int = torchaudio.load(__magic_name__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class a_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : List[str] = self.default_feature_extractor
snake_case : str = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(UpperCAmelCase__ )
snake_case : str = self.default_feature_extractor
snake_case , snake_case : int = prepare_audio()
snake_case : Optional[int] = audio.squeeze().numpy()
snake_case : Optional[Any] = feature_extractor(UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case : Union[str, Any] = model(**UpperCAmelCase__ )
# verify the logits
snake_case : Any = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
snake_case : str = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 84 | 0 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class a_ :
def __init__( self : Tuple , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
snake_case : List[Any] = data
snake_case : Tuple = [0X67_452_301, 0XEF_CDA_B89, 0X98_BAD_CFE, 0X10_325_476, 0XC3_D2E_1F0]
@staticmethod
def lowerCAmelCase( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any ):
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XFF_FFF_FFF
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : str = b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64)
snake_case : Optional[Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Any ):
"""simple docstring"""
snake_case : List[str] = list(struct.unpack('''>16L''' , UpperCAmelCase__ ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : int = self.padding()
snake_case : int = self.split_blocks()
for block in self.blocks:
snake_case : List[Any] = self.expand_block(UpperCAmelCase__ )
snake_case : Tuple = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case : Union[str, Any] = (b & c) | ((~b) & d)
snake_case : int = 0X5A_827_999
elif 20 <= i < 40:
snake_case : Tuple = b ^ c ^ d
snake_case : List[Any] = 0X6E_D9E_BA1
elif 40 <= i < 60:
snake_case : int = (b & c) | (b & d) | (c & d)
snake_case : List[str] = 0X8F_1BB_CDC
elif 60 <= i < 80:
snake_case : int = b ^ c ^ d
snake_case : Tuple = 0XCA_62C_1D6
snake_case : Optional[int] = (
self.rotate(UpperCAmelCase__ , 5 ) + f + e + k + expanded_block[i] & 0XFF_FFF_FFF,
a,
self.rotate(UpperCAmelCase__ , 30 ),
c,
d,
)
snake_case : Optional[Any] = (
self.h[0] + a & 0XFF_FFF_FFF,
self.h[1] + b & 0XFF_FFF_FFF,
self.h[2] + c & 0XFF_FFF_FFF,
self.h[3] + d & 0XFF_FFF_FFF,
self.h[4] + e & 0XFF_FFF_FFF,
)
return ("{:08x}" * 5).format(*self.h )
def a_ ( ) -> int:
"""simple docstring"""
snake_case : Tuple = b'''Test String'''
assert SHAaHash(__magic_name__ ).final_hash() == hashlib.shaa(__magic_name__ ).hexdigest() # noqa: S324
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[Any] = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
snake_case : Tuple = parser.parse_args()
snake_case : Tuple = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
snake_case : Tuple = f.read()
else:
snake_case : Union[str, Any] = bytes(__magic_name__ , '''utf-8''' )
print(SHAaHash(__magic_name__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 716 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : Union[str, Any] = logging.getLogger(__name__)
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class a_ :
A__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class a_ :
A__ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
A__ : str = field(metadata={'help': 'Should contain the data files for the task.'} )
A__ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A__ : bool = field(
default=a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case , snake_case , snake_case : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __magic_name__ )
# Set seed
set_seed(training_args.seed )
try:
snake_case : int = processors[data_args.task_name]()
snake_case : List[str] = processor.get_labels()
snake_case : str = len(__magic_name__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case : Optional[int] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__magic_name__ ) -> Dict:
snake_case : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__magic_name__ , p.label_ids )}
# Data collator
snake_case : Dict = DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case : List[Any] = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case : Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case : Optional[Any] = trainer.evaluate()
snake_case : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__magic_name__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__magic_name__ )
return results
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 84 | 0 |
_a : List[str] = 0 # The first color of the flag.
_a : Any = 1 # The second color of the flag.
_a : Union[str, Any] = 2 # The third color of the flag.
_a : Union[str, Any] = (red, white, blue)
def a_ ( __magic_name__ ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(__magic_name__ ) == 1:
return list(__magic_name__ )
snake_case : Tuple = 0
snake_case : Any = len(__magic_name__ ) - 1
snake_case : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
snake_case : Optional[Any] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
snake_case : List[str] = sequence[high], sequence[mid]
high -= 1
else:
snake_case : int = F"The elements inside the sequence must contains only {colors} values"
raise ValueError(__magic_name__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : Union[str, Any] = input('Enter numbers separated by commas:\n').strip()
_a : Optional[Any] = [int(item.strip()) for item in user_input.split(',')]
print(f"{dutch_national_flag_sort(unsorted)}")
| 717 |
import re
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
snake_case : List[str] = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
_a : Any = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 84 | 0 |
from jiwer import compute_measures
import datasets
_a : int = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
_a : str = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
_a : Tuple = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Tuple=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(UpperCAmelCase__ , UpperCAmelCase__ )["wer"]
else:
snake_case : Optional[int] = 0
snake_case : List[Any] = 0
for prediction, reference in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Dict = compute_measures(UpperCAmelCase__ , UpperCAmelCase__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 718 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : int=18 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Optional[int]=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=True , ):
"""simple docstring"""
snake_case : int = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = num_channels
snake_case : Union[str, Any] = image_size
snake_case : Dict = min_resolution
snake_case : Dict = max_resolution
snake_case : int = do_resize
snake_case : List[str] = size
snake_case : List[Any] = apply_ocr
def lowerCAmelCase( self : int ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a_ ( a , unittest.TestCase ):
A__ : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''apply_ocr''' ) )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
# Initialize image_processing
snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
snake_case : Dict = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : List[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : Tuple = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# with apply_OCR = True
snake_case : int = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case : List[Any] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
snake_case : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
snake_case : Any = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case : Optional[Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
snake_case : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
snake_case : str = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
snake_case : Optional[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 84 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_a : Any = logging.get_logger(__name__)
class a_ :
A__ : str
A__ : str = None
@staticmethod
def lowerCAmelCase( ):
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , **UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Any ):
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase( self : Any ):
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def lowerCAmelCase( cls : Optional[Any] ):
"""simple docstring"""
return F"`pip install {cls.pip_package or cls.name}`"
class a_ ( a ):
A__ : Optional[int] = 'optuna'
@staticmethod
def lowerCAmelCase( ):
"""simple docstring"""
return is_optuna_available()
def lowerCAmelCase( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , **UpperCAmelCase__ : str ):
"""simple docstring"""
return run_hp_search_optuna(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return default_hp_space_optuna(UpperCAmelCase__ )
class a_ ( a ):
A__ : Union[str, Any] = 'ray'
A__ : Any = '\'ray[tune]\''
@staticmethod
def lowerCAmelCase( ):
"""simple docstring"""
return is_ray_available()
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , **UpperCAmelCase__ : Tuple ):
"""simple docstring"""
return run_hp_search_ray(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
return default_hp_space_ray(UpperCAmelCase__ )
class a_ ( a ):
A__ : Any = 'sigopt'
@staticmethod
def lowerCAmelCase( ):
"""simple docstring"""
return is_sigopt_available()
def lowerCAmelCase( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
return run_hp_search_sigopt(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Dict ):
"""simple docstring"""
return default_hp_space_sigopt(UpperCAmelCase__ )
class a_ ( a ):
A__ : List[Any] = 'wandb'
@staticmethod
def lowerCAmelCase( ):
"""simple docstring"""
return is_wandb_available()
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : str , **UpperCAmelCase__ : Any ):
"""simple docstring"""
return run_hp_search_wandb(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return default_hp_space_wandb(UpperCAmelCase__ )
_a : Optional[int] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def a_ ( ) -> str:
"""simple docstring"""
snake_case : List[str] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__magic_name__ ) > 0:
snake_case : Dict = available_backends[0].name
if len(__magic_name__ ) > 1:
logger.info(
F"{len(__magic_name__ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 719 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=8 ) -> str:
"""simple docstring"""
snake_case : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if latents is None:
snake_case : int = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case : Optional[Any] = latents.to(UpperCAmelCase__ )
snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case : Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
snake_case : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Optional[int] = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
snake_case : Optional[int] = self._execution_device
snake_case : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Any = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : int = torch.cat(UpperCAmelCase__ , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Dict = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Tuple = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
snake_case : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
snake_case : str = self.scheduler.timesteps
snake_case : Optional[Any] = self.movq.config.latent_channels
snake_case , snake_case : Optional[Any] = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
snake_case : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Optional[int] = {'''image_embeds''': image_embeds, '''hint''': hint}
snake_case : Any = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Any = noise_pred.chunk(2 )
snake_case , snake_case : Dict = variance_pred.chunk(2 )
snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
snake_case : List[Any] = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case : Optional[Any] = image * 0.5 + 0.5
snake_case : int = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : str = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_a : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
_a : Union[str, Any] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
_a : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def a_ ( __magic_name__ ) -> List[str]:
"""simple docstring"""
with open(__magic_name__ , '''rb''' ) as f:
snake_case : List[str] = Image.open(__magic_name__ )
return im.convert('''RGB''' )
@dataclass
class a_ :
A__ : Optional[str] = field(
default=a , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
A__ : Optional[str] = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A__ : Optional[str] = field(default=a , metadata={'help': 'A folder containing the training data.'} )
A__ : Optional[str] = field(default=a , metadata={'help': 'A folder containing the validation data.'} )
A__ : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
A__ : Optional[int] = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A__ : Optional[int] = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class a_ :
A__ : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
A__ : Optional[str] = field(
default=a , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(a )} , )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
A__ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A__ : str = field(default=a , metadata={'help': 'Name or path of preprocessor config.'} )
A__ : bool = field(
default=a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A__ : bool = field(
default=a , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
snake_case : str = torch.stack([example['''pixel_values'''] for example in examples] )
snake_case : List[Any] = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , __magic_name__ , __magic_name__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(__magic_name__ )
transformers.utils.logging.set_verbosity(__magic_name__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
snake_case : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
snake_case : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case : Optional[Any] = {}
if data_args.train_dir is not None:
snake_case : str = os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
snake_case : Dict = os.path.join(data_args.validation_dir , '''**''' )
snake_case : Tuple = load_dataset(
'''imagefolder''' , data_files=__magic_name__ , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case : List[Any] = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __magic_name__ ) and data_args.train_val_split > 0.0:
snake_case : List[str] = dataset['''train'''].train_test_split(data_args.train_val_split )
snake_case : List[Any] = split['''train''']
snake_case : List[str] = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case : List[str] = dataset['''train'''].features['''labels'''].names
snake_case : Dict = {}, {}
for i, label in enumerate(__magic_name__ ):
snake_case : Dict = str(__magic_name__ )
snake_case : Tuple = label
# Load the accuracy metric from the datasets package
snake_case : Optional[int] = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__magic_name__ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
snake_case : Any = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__magic_name__ ) , labelaid=__magic_name__ , idalabel=__magic_name__ , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case : Tuple = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
snake_case : str = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
snake_case : Any = image_processor.size['''shortest_edge''']
else:
snake_case : Tuple = (image_processor.size['''height'''], image_processor.size['''width'''])
snake_case : Union[str, Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
snake_case : List[Any] = Compose(
[
RandomResizedCrop(__magic_name__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
snake_case : Optional[Any] = Compose(
[
Resize(__magic_name__ ),
CenterCrop(__magic_name__ ),
ToTensor(),
normalize,
] )
def train_transforms(__magic_name__ ):
snake_case : Optional[int] = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(__magic_name__ ):
snake_case : Union[str, Any] = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
snake_case : Optional[Any] = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__magic_name__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
snake_case : Dict = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__magic_name__ )
# Initalize our trainer
snake_case : Dict = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=__magic_name__ , tokenizer=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
snake_case : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
snake_case : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case : List[Any] = last_checkpoint
snake_case : Dict = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case : List[Any] = trainer.evaluate()
trainer.log_metrics('''eval''' , __magic_name__ )
trainer.save_metrics('''eval''' , __magic_name__ )
# Write model card and (optionally) push to hub
snake_case : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__magic_name__ )
else:
trainer.create_model_card(**__magic_name__ )
if __name__ == "__main__":
main()
| 720 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a_ ( a , unittest.TestCase ):
A__ : Dict = ReformerTokenizer
A__ : Optional[int] = ReformerTokenizerFast
A__ : str = True
A__ : Tuple = False
A__ : str = True
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
super().setUp()
snake_case : str = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = '''<s>'''
snake_case : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1_000 )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case : Any = self.get_tokenizer()
snake_case : str = self.get_rust_tokenizer()
snake_case : Tuple = '''I was born in 92000, and this is falsé.'''
snake_case : str = tokenizer.tokenize(UpperCAmelCase__ )
snake_case : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
snake_case : List[str] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : Optional[int] = tokenizer.encode(UpperCAmelCase__ )
snake_case : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any]=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : str = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# Simple input
snake_case : Union[str, Any] = '''This is a simple input'''
snake_case : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case : int = ('''This is a simple input''', '''This is a pair''')
snake_case : int = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
snake_case : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
snake_case : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case : int = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Any = '''Hello World!'''
snake_case : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case : Union[str, Any] = ''' '''.join(UpperCAmelCase__ )
snake_case : Optional[int] = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' )
snake_case : List[str] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
snake_case : Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case : Tuple = encoded_sequence['''input_ids'''].shape
snake_case : List[Any] = ReformerModel(UpperCAmelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
# fmt: off
snake_case : Tuple = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case : Tuple = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=UpperCAmelCase__ , sequences=UpperCAmelCase__ , )
| 84 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : Optional[int] ):
snake_case : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
snake_case : Any = DisjunctiveConstraint(UpperCAmelCase__ )
self.assertTrue(isinstance(dc.token_ids , UpperCAmelCase__ ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowerCAmelCase( self : Dict ):
snake_case : List[str] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(UpperCAmelCase__ ) # fails here
def lowerCAmelCase( self : Tuple ):
snake_case : Union[str, Any] = [[1, 2, 3], [1, 2, 4]]
snake_case : Tuple = DisjunctiveConstraint(UpperCAmelCase__ )
snake_case : Union[str, Any] = dc.update(1 )
snake_case : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case : Dict = dc.update(2 )
snake_case : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case : List[Any] = dc.update(3 )
snake_case : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowerCAmelCase( self : str ):
snake_case : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case : Dict = DisjunctiveConstraint(UpperCAmelCase__ )
snake_case : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case : Optional[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case : List[str] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case : Tuple = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 721 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case , snake_case : Any = image.size
snake_case , snake_case : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case : Dict = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
snake_case : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case : Tuple = torch.from_numpy(__magic_name__ )
return 2.0 * image - 1.0
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : VQModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[int] = 100 , UpperCAmelCase__ : Optional[float] = 0.0 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[int] = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
snake_case : Any = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase__ )}" )
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[Any] = preprocess(UpperCAmelCase__ )
snake_case , snake_case : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case : str = next(self.unet.parameters() ).dtype
snake_case : Dict = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
snake_case : Any = image.to(device=self.device , dtype=UpperCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device )
snake_case : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_eta:
snake_case : Dict = eta
for t in self.progress_bar(UpperCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
snake_case : Optional[int] = torch.cat([latents, image] , dim=1 )
snake_case : str = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
snake_case : int = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : Any = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
snake_case : Optional[int] = self.vqvae.decode(UpperCAmelCase__ ).sample
snake_case : int = torch.clamp(UpperCAmelCase__ , -1.0 , 1.0 )
snake_case : Dict = image / 2 + 0.5
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Any = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 0 |
from math import isqrt, loga
def a_ ( __magic_name__ ) -> list[int]:
"""simple docstring"""
snake_case : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __magic_name__ , __magic_name__ ):
snake_case : Any = False
return [i for i in range(2 , __magic_name__ ) if is_prime[i]]
def a_ ( __magic_name__ = 800_800 , __magic_name__ = 800_800 ) -> int:
"""simple docstring"""
snake_case : List[Any] = degree * loga(__magic_name__ )
snake_case : int = int(__magic_name__ )
snake_case : int = calculate_prime_numbers(__magic_name__ )
snake_case : Any = 0
snake_case : Union[str, Any] = 0
snake_case : int = len(__magic_name__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 700 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class a_ ( a ):
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Dict = 5
# Realm tok
snake_case : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
snake_case : Any = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[int] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Dict = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=UpperCAmelCase__ , )
return block_records
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Tuple = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = self.get_config()
snake_case : Optional[Any] = self.get_dummy_retriever()
snake_case : Optional[int] = retriever.tokenizer
snake_case : Dict = np.array([0, 3] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Union[str, Any] = tokenizer(
['''the fourth'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : Optional[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : List[str] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.get_config()
snake_case : Optional[int] = self.get_dummy_retriever()
snake_case : List[str] = retriever.tokenizer
snake_case : Optional[Any] = np.array([0, 3, 5] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Any = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : List[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : Union[str, Any] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual([False, True, True] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
snake_case : Optional[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
snake_case : Any = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case : Any = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 84 | 0 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = AutoConfig.from_pretrained(__magic_name__ )
snake_case : Dict = FlaxAutoModelForSeqaSeqLM.from_config(config=__magic_name__ )
snake_case : Any = checkpoints.load_tax_checkpoint(__magic_name__ )
snake_case : int = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
snake_case : Any = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
snake_case : str = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case : Optional[Any] = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
snake_case : List[Any] = F"layers_{str(__magic_name__ )}"
# Self-Attention
snake_case : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
snake_case : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
snake_case : Any = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
snake_case : Any = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
snake_case : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
snake_case : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
snake_case : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
snake_case : List[Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
snake_case : Dict = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
snake_case : Any = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
snake_case : Optional[int] = flax_model.params['''encoder''']['''block'''][str(__magic_name__ )]['''layer''']
snake_case : Dict = tax_attention_key
snake_case : Union[str, Any] = tax_attention_out
snake_case : List[Any] = tax_attention_query
snake_case : Dict = tax_attention_value
snake_case : List[Any] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case : Union[str, Any] = tax_global_layer_norm
if split_mlp_wi:
snake_case : str = tax_mlp_wi_a
snake_case : List[str] = tax_mlp_wi_a
else:
snake_case : List[str] = tax_mlp_wi
snake_case : Any = tax_mlp_wo
snake_case : List[Any] = tax_mlp_layer_norm
snake_case : Dict = flax_model_encoder_layer_block
# Only for layer 0:
snake_case : Optional[int] = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
snake_case : Union[str, Any] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case : Any = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
snake_case : Optional[Any] = tax_encoder_global_rel_embedding
# Assigning
snake_case : Tuple = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
snake_case : Optional[Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
snake_case : List[str] = F"layers_{str(__magic_name__ )}"
# Self-Attention
snake_case : Any = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
snake_case : Any = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
snake_case : Any = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
snake_case : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
snake_case : str = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
snake_case : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
snake_case : Any = tax_enc_dec_attention_module['''key''']['''kernel''']
snake_case : int = tax_enc_dec_attention_module['''out''']['''kernel''']
snake_case : Tuple = tax_enc_dec_attention_module['''query''']['''kernel''']
snake_case : Optional[int] = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
snake_case : int = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
snake_case : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
snake_case : Dict = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
snake_case : Dict = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
snake_case : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
snake_case : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
snake_case : Union[str, Any] = flax_model.params['''decoder''']['''block'''][str(__magic_name__ )]['''layer''']
snake_case : Union[str, Any] = tax_attention_key
snake_case : Optional[int] = tax_attention_out
snake_case : Dict = tax_attention_query
snake_case : Optional[int] = tax_attention_value
snake_case : Union[str, Any] = tax_pre_attention_layer_norm
snake_case : Optional[int] = tax_enc_dec_attention_key
snake_case : int = tax_enc_dec_attention_out
snake_case : str = tax_enc_dec_attention_query
snake_case : List[Any] = tax_enc_dec_attention_value
snake_case : Tuple = tax_cross_layer_norm
if split_mlp_wi:
snake_case : int = tax_mlp_wi_a
snake_case : Union[str, Any] = tax_mlp_wi_a
else:
snake_case : List[Any] = tax_mlp_wi
snake_case : Tuple = tax_mlp_wo
snake_case : Any = txa_mlp_layer_norm
snake_case : Union[str, Any] = flax_model_decoder_layer_block
# Decoder Normalization
snake_case : List[Any] = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
snake_case : Dict = txa_decoder_norm
# Only for layer 0:
snake_case : str = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
snake_case : Any = tax_decoder_rel_embedding
# Token Embeddings
snake_case : Union[str, Any] = tax_model['''target''']['''token_embedder''']['''embedding''']
snake_case : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
snake_case : Optional[Any] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(__magic_name__ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_a : Dict = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 701 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : str = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 | 0 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : int = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
snake_case : Tuple = get_aligned_output_features_output_indices(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , ['''c'''] )
self.assertEqual(UpperCAmelCase__ , [2] )
# Out indices set to match out features
snake_case : Dict = get_aligned_output_features_output_indices(['''a''', '''c'''] , UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , ['''a''', '''c'''] )
self.assertEqual(UpperCAmelCase__ , [0, 2] )
# Out features set to match out indices
snake_case : Any = get_aligned_output_features_output_indices(UpperCAmelCase__ , [0, 2] , UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , ['''a''', '''c'''] )
self.assertEqual(UpperCAmelCase__ , [0, 2] )
# Out features selected from negative indices
snake_case : Optional[Any] = get_aligned_output_features_output_indices(UpperCAmelCase__ , [-3, -1] , UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , ['''a''', '''c'''] )
self.assertEqual(UpperCAmelCase__ , [-3, -1] )
def lowerCAmelCase( self : str ):
"""simple docstring"""
# Stage names must be set
with self.assertRaises(UpperCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , UpperCAmelCase__ )
# Out features must be a list
with self.assertRaises(UpperCAmelCase__ ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(UpperCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(UpperCAmelCase__ ):
verify_out_features_out_indices(UpperCAmelCase__ , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(UpperCAmelCase__ ):
verify_out_features_out_indices(UpperCAmelCase__ , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(UpperCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(UpperCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(UpperCAmelCase__ ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Union[str, Any] = BackboneMixin()
snake_case : List[Any] = ['''a''', '''b''', '''c''']
snake_case : int = ['''a''', '''c''']
snake_case : Tuple = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
snake_case : Optional[int] = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
snake_case : List[Any] = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 702 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_a : Union[str, Any] = {
'yjernite/retribert-base-uncased': 512,
}
_a : Tuple = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class a_ ( a ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Any = PRETRAINED_INIT_CONFIGURATION
A__ : Optional[Any] = RetriBertTokenizer
A__ : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict="[UNK]" , UpperCAmelCase__ : str="[SEP]" , UpperCAmelCase__ : Union[str, Any]="[PAD]" , UpperCAmelCase__ : Dict="[CLS]" , UpperCAmelCase__ : Optional[Any]="[MASK]" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Dict , ):
"""simple docstring"""
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
snake_case : int = getattr(UpperCAmelCase__ , normalizer_state.pop('''type''' ) )
snake_case : List[Any] = do_lower_case
snake_case : Union[str, Any] = strip_accents
snake_case : int = tokenize_chinese_chars
snake_case : int = normalizer_class(**UpperCAmelCase__ )
snake_case : Union[str, Any] = do_lower_case
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=None ):
"""simple docstring"""
snake_case : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : List[Any] = [self.sep_token_id]
snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
snake_case : Tuple = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 84 | 0 |
from math import factorial
def a_ ( __magic_name__ = 100 ) -> int:
"""simple docstring"""
return sum(map(__magic_name__ , str(factorial(__magic_name__ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 703 |
import string
import numpy
def a_ ( __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , __magic_name__ )
class a_ :
A__ : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
A__ : List[str] = numpy.vectorize(lambda a : x % 36 )
A__ : Dict = numpy.vectorize(a )
def __init__( self : List[str] , UpperCAmelCase__ : numpy.ndarray ):
"""simple docstring"""
snake_case : int = self.modulus(UpperCAmelCase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
snake_case : List[str] = encrypt_key.shape[0]
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
return self.key_string.index(UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : int ):
"""simple docstring"""
return self.key_string[round(UpperCAmelCase__ )]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : Tuple = det % len(self.key_string )
snake_case : Tuple = len(self.key_string )
if greatest_common_divisor(UpperCAmelCase__ , len(self.key_string ) ) != 1:
snake_case : List[Any] = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = [char for char in text.upper() if char in self.key_string]
snake_case : Optional[int] = chars[-1]
while len(UpperCAmelCase__ ) % self.break_key != 0:
chars.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = self.process_text(text.upper() )
snake_case : Optional[int] = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : int = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : Tuple = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(self.encrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[
0
]
snake_case : Dict = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : int = det % len(self.key_string )
snake_case : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
snake_case : Any = i
break
snake_case : Any = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCAmelCase__ ) )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Any = self.make_decrypt_key()
snake_case : Optional[Any] = self.process_text(text.upper() )
snake_case : int = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : Any = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : List[str] = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(decrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[0]
snake_case : int = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a_ ( ) -> None:
"""simple docstring"""
snake_case : Any = int(input('''Enter the order of the encryption key: ''' ) )
snake_case : List[Any] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(__magic_name__ ):
snake_case : Optional[Any] = [int(__magic_name__ ) for x in input().split()]
hill_matrix.append(__magic_name__ )
snake_case : List[str] = HillCipher(numpy.array(__magic_name__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
snake_case : int = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
snake_case : List[Any] = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(__magic_name__ ) )
elif option == "2":
snake_case : int = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 84 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_a : List[Any] = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
_a : Dict = parser.parse_args()
_a : Optional[int] = 'cpu'
_a : List[Any] = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
_a : str = 'path-to-your-trained-model'
_a : Optional[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_a : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_a : Optional[Any] = pipe.to(device)
# to channels last
_a : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
_a : Union[str, Any] = pipe.vae.to(memory_format=torch.channels_last)
_a : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_a : List[str] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_a : List[Any] = torch.randn(2, 4, 64, 64)
_a : List[Any] = torch.rand(1) * 999
_a : Any = torch.randn(2, 77, 768)
_a : int = (sample, timestep, encoder_hidden_status)
try:
_a : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_a : Dict = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_a : Optional[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_a : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_a : Dict = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_a : Optional[Any] = 666
_a : Optional[Any] = torch.Generator(device).manual_seed(seed)
_a : List[Any] = {'generator': generator}
if args.steps is not None:
_a : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_a : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 704 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( a ):
A__ : List[Any] = 'Salesforce/blip-image-captioning-base'
A__ : Dict = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
A__ : str = 'image_captioner'
A__ : Dict = AutoModelForVisionaSeq
A__ : Optional[Any] = ['image']
A__ : List[str] = ['text']
def __init__( self : List[str] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=UpperCAmelCase__ , return_tensors='''pt''' )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return self.model.generate(**UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0].strip()
| 84 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class a_ :
def __init__( self : List[str] , UpperCAmelCase__ : List[str] , ):
"""simple docstring"""
snake_case : Optional[int] = parent
snake_case : List[Any] = 13
snake_case : Optional[int] = 7
snake_case : Any = True
snake_case : Tuple = True
snake_case : List[Any] = False
snake_case : Any = True
snake_case : Optional[int] = 99
snake_case : int = 32
snake_case : Dict = 2
snake_case : Dict = 4
snake_case : Optional[int] = 37
snake_case : Tuple = '''gelu'''
snake_case : Optional[Any] = 0.1
snake_case : Dict = 0.1
snake_case : str = 512
snake_case : List[Any] = 16
snake_case : Dict = 2
snake_case : List[str] = 0.02
snake_case : int = 3
snake_case : Optional[int] = 4
snake_case : Tuple = None
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : int = None
if self.use_input_mask:
snake_case : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Tuple = None
snake_case : str = None
snake_case : List[Any] = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
snake_case : Tuple = TFDistilBertModel(config=UpperCAmelCase__ )
snake_case : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
snake_case : Union[str, Any] = model(UpperCAmelCase__ )
snake_case : Tuple = [input_ids, input_mask]
snake_case : int = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Optional[int] = TFDistilBertForMaskedLM(config=UpperCAmelCase__ )
snake_case : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
snake_case : Union[str, Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ):
"""simple docstring"""
snake_case : Union[str, Any] = TFDistilBertForQuestionAnswering(config=UpperCAmelCase__ )
snake_case : List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
snake_case : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any ):
"""simple docstring"""
snake_case : Any = self.num_labels
snake_case : Optional[int] = TFDistilBertForSequenceClassification(UpperCAmelCase__ )
snake_case : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
snake_case : List[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
snake_case : Dict = self.num_choices
snake_case : Tuple = TFDistilBertForMultipleChoice(UpperCAmelCase__ )
snake_case : Dict = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case : str = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case : Tuple = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
snake_case : Optional[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int ):
"""simple docstring"""
snake_case : Optional[Any] = self.num_labels
snake_case : Any = TFDistilBertForTokenClassification(UpperCAmelCase__ )
snake_case : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
snake_case : List[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = self.prepare_config_and_inputs()
(snake_case) : Union[str, Any] = config_and_inputs
snake_case : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class a_ ( a , a , unittest.TestCase ):
A__ : str = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
A__ : int = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ : Optional[Any] = False
A__ : int = False
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : List[Any] = TFDistilBertModelTester(self )
snake_case : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase__ , dim=37 )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase__ )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase__ )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase__ )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : int ):
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
snake_case : str = TFDistilBertModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_tf
class a_ ( unittest.TestCase ):
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
snake_case : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case : Tuple = model(UpperCAmelCase__ )[0]
snake_case : List[str] = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase__ )
snake_case : Dict = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4 )
| 705 |
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
snake_case : int = 4
snake_case : Optional[Any] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 84 | 0 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_a : Optional[Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def a_ ( __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
snake_case : Dict = XLMProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
snake_case : List[str] = XLMProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
else:
snake_case : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
snake_case : List[str] = ProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
snake_case : Tuple = ['''key_proj''', '''value_proj''', '''query_proj''']
snake_case : List[Any] = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
snake_case : Optional[int] = key.split('''.''' )
if attributes[0] == "lm_head":
snake_case : Optional[Any] = prophet
snake_case : int = prophet_old
else:
snake_case : List[str] = prophet.prophetnet
snake_case : str = prophet_old.model
snake_case : Any = False
for attribute in attributes:
if attribute in mapping:
snake_case : Optional[Any] = mapping[attribute]
if not hasattr(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) > 0:
snake_case : Union[str, Any] = attribute
elif hasattr(__magic_name__ , __magic_name__ ):
snake_case : Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
snake_case : str = old_model.weight
logger.info(F"{attribute} is initialized." )
snake_case : List[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
snake_case : Union[str, Any] = old_model.bias
logger.info(F"{attribute} is initialized" )
snake_case : Dict = True
break
elif attribute in special_keys and hasattr(__magic_name__ , '''in_proj_weight''' ):
snake_case : List[Any] = old_model.in_proj_weight.shape[0] // 3
snake_case : Optional[int] = getattr(__magic_name__ , __magic_name__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
snake_case : str = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
snake_case : Any = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
snake_case : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
snake_case : List[str] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
snake_case : Dict = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
snake_case : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
snake_case : List[Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
snake_case : Dict = nn.Parameter(old_model.embed_positions.weight[:512, :] )
snake_case : Union[str, Any] = True
break
if attribute.isdigit():
snake_case : Dict = model[int(__magic_name__ )]
snake_case : Optional[int] = old_model[int(__magic_name__ )]
else:
snake_case : Any = getattr(__magic_name__ , __magic_name__ )
if old_attribute == "":
snake_case : Tuple = old_model
else:
if not hasattr(__magic_name__ , __magic_name__ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
snake_case : str = getattr(__magic_name__ , __magic_name__ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a : Optional[Any] = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 706 |
from sklearn.metrics import fa_score
import datasets
_a : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
_a : Dict = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
_a : List[Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : List[str]="binary" , UpperCAmelCase__ : str=None ):
"""simple docstring"""
snake_case : List[Any] = fa_score(
UpperCAmelCase__ , UpperCAmelCase__ , labels=UpperCAmelCase__ , pos_label=UpperCAmelCase__ , average=UpperCAmelCase__ , sample_weight=UpperCAmelCase__ )
return {"f1": float(UpperCAmelCase__ ) if score.size == 1 else score}
| 84 | 0 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
snake_case : str = F"Input value of [number={number}] must be an integer"
raise TypeError(__magic_name__ )
if number < 1:
snake_case : Union[str, Any] = F"Input value of [number={number}] must be > 0"
raise ValueError(__magic_name__ )
snake_case : Union[str, Any] = 1
for i in range(1 , __magic_name__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('''only integers accepted as input''' )
else:
snake_case : str = str(abs(__magic_name__ ) )
snake_case : Optional[Any] = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int(''''''.join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 84 | 0 |
def a_ ( __magic_name__ , __magic_name__ = False ) -> bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
snake_case : Any = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
snake_case : Any = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__magic_name__ , 1 ):
if n < _p:
# then we have our last prime to check
snake_case : str = primes[:idx]
break
snake_case : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case : Any = False
for r in range(__magic_name__ ):
snake_case : List[Any] = pow(__magic_name__ , d * 2**r , __magic_name__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def a_ ( ) -> None:
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 708 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=99 , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Any=9 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : str=0.002 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , ):
"""simple docstring"""
snake_case : Union[str, Any] = parent
snake_case : Union[str, Any] = batch_size
snake_case : Any = encoder_seq_length
snake_case : str = decoder_seq_length
# For common tests
snake_case : Optional[int] = self.decoder_seq_length
snake_case : Optional[Any] = is_training
snake_case : List[Any] = use_attention_mask
snake_case : Union[str, Any] = use_labels
snake_case : Any = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : Any = d_ff
snake_case : Any = relative_attention_num_buckets
snake_case : Optional[Any] = dropout_rate
snake_case : int = initializer_factor
snake_case : Optional[Any] = eos_token_id
snake_case : Dict = pad_token_id
snake_case : Optional[Any] = decoder_start_token_id
snake_case : Union[str, Any] = None
snake_case : List[str] = decoder_layers
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
return TaConfig.from_pretrained('''google/umt5-base''' )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
snake_case : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case : List[Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if decoder_head_mask is None:
snake_case : Tuple = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if cross_attn_head_mask is None:
snake_case : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case : List[str] = input_ids.clamp(self.pad_token_id + 1 )
snake_case : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case : str = self.get_config()
snake_case : Tuple = config.num_attention_heads
snake_case : List[Any] = self.prepare_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, input_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , ):
"""simple docstring"""
snake_case : str = UMTaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : str = model(
input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , )
snake_case : int = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
snake_case : int = result.last_hidden_state
snake_case : Dict = result.past_key_values
snake_case : Dict = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval()
# first forward pass
snake_case : List[Any] = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
snake_case : List[Any] = model(UpperCAmelCase__ )
snake_case : Any = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) + 1 )
snake_case , snake_case : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : Any = model(UpperCAmelCase__ )['''last_hidden_state''']
snake_case : Optional[Any] = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )['''last_hidden_state''']
# select random slice
snake_case : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).half().eval()
snake_case : str = model(**UpperCAmelCase__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(UpperCAmelCase__ ).any().item() )
@require_torch
class a_ ( a , a , a , unittest.TestCase ):
A__ : str = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A__ : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A__ : Any = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A__ : Dict = True
A__ : List[str] = False
A__ : Optional[int] = False
A__ : Optional[int] = True
A__ : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A__ : int = [0.8, 0.9]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
snake_case : Optional[Any] = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=UpperCAmelCase__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Optional[int] = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
snake_case : int = config_and_inputs[0]
snake_case : Union[str, Any] = UMTaForConditionalGeneration(UpperCAmelCase__ ).eval()
model.to(UpperCAmelCase__ )
snake_case : str = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase__ , head_masking.items() ):
snake_case : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case : List[str] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ )
snake_case : Union[str, Any] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , **UpperCAmelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=UpperCAmelCase__ ).to(UpperCAmelCase__ )
snake_case : int = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=UpperCAmelCase__ , legacy=UpperCAmelCase__ )
snake_case : List[str] = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
snake_case : Dict = tokenizer(UpperCAmelCase__ , return_tensors='''pt''' , padding=UpperCAmelCase__ ).input_ids
# fmt: off
snake_case : Optional[Any] = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = model.generate(input_ids.to(UpperCAmelCase__ ) )
snake_case : int = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
snake_case : Tuple = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 84 | 0 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int]=13 , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=99 , UpperCAmelCase__ : str=[1, 1, 2] , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : Dict=8 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : Dict="gelu_new" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Tuple=512 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[Any]=False , ):
"""simple docstring"""
snake_case : List[str] = parent
snake_case : Union[str, Any] = batch_size
snake_case : Tuple = seq_length
snake_case : int = is_training
snake_case : int = use_input_mask
snake_case : Optional[Any] = use_token_type_ids
snake_case : Dict = use_labels
snake_case : Tuple = vocab_size
snake_case : Optional[int] = block_sizes
snake_case : Optional[Any] = num_decoder_layers
snake_case : Union[str, Any] = d_model
snake_case : Tuple = n_head
snake_case : Optional[Any] = d_head
snake_case : Optional[int] = d_inner
snake_case : Optional[int] = hidden_act
snake_case : Dict = hidden_dropout
snake_case : List[str] = attention_dropout
snake_case : Dict = activation_dropout
snake_case : List[str] = max_position_embeddings
snake_case : List[str] = type_vocab_size
snake_case : List[str] = 2
snake_case : Optional[Any] = num_labels
snake_case : Tuple = num_choices
snake_case : Dict = scope
snake_case : int = initializer_std
# Used in the tests to check the size of the first attention layer
snake_case : Any = n_head
# Used in the tests to check the size of the first hidden state
snake_case : List[str] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
snake_case : Tuple = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
snake_case : Any = self.num_hidden_layers + 2
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Dict = None
if self.use_input_mask:
snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : int = None
if self.use_token_type_ids:
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : int = None
snake_case : Optional[int] = None
snake_case : int = None
if self.use_labels:
snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case : str = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , ):
"""simple docstring"""
snake_case : Optional[int] = TFFunnelModel(config=UpperCAmelCase__ )
snake_case : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : Union[str, Any] = model(UpperCAmelCase__ )
snake_case : Tuple = [input_ids, input_mask]
snake_case : Dict = model(UpperCAmelCase__ )
snake_case : List[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case : str = False
snake_case : Union[str, Any] = TFFunnelModel(config=UpperCAmelCase__ )
snake_case : Optional[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case : List[Any] = False
snake_case : Dict = TFFunnelModel(config=UpperCAmelCase__ )
snake_case : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , ):
"""simple docstring"""
snake_case : int = TFFunnelBaseModel(config=UpperCAmelCase__ )
snake_case : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : Optional[int] = model(UpperCAmelCase__ )
snake_case : Optional[int] = [input_ids, input_mask]
snake_case : List[str] = model(UpperCAmelCase__ )
snake_case : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
snake_case : List[str] = False
snake_case : Union[str, Any] = TFFunnelBaseModel(config=UpperCAmelCase__ )
snake_case : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
snake_case : Any = False
snake_case : int = TFFunnelBaseModel(config=UpperCAmelCase__ )
snake_case : int = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , ):
"""simple docstring"""
snake_case : Dict = TFFunnelForPreTraining(config=UpperCAmelCase__ )
snake_case : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : List[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , ):
"""simple docstring"""
snake_case : List[str] = TFFunnelForMaskedLM(config=UpperCAmelCase__ )
snake_case : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , ):
"""simple docstring"""
snake_case : Any = self.num_labels
snake_case : Dict = TFFunnelForSequenceClassification(config=UpperCAmelCase__ )
snake_case : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : List[str] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , ):
"""simple docstring"""
snake_case : Dict = self.num_choices
snake_case : Tuple = TFFunnelForMultipleChoice(config=UpperCAmelCase__ )
snake_case : str = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case : List[Any] = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case : Dict = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case : Union[str, Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
snake_case : Dict = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , ):
"""simple docstring"""
snake_case : str = self.num_labels
snake_case : str = TFFunnelForTokenClassification(config=UpperCAmelCase__ )
snake_case : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : Dict = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , ):
"""simple docstring"""
snake_case : Optional[int] = TFFunnelForQuestionAnswering(config=UpperCAmelCase__ )
snake_case : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case : List[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = self.prepare_config_and_inputs()
(
snake_case
) : List[Any] = config_and_inputs
snake_case : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class a_ ( a , a , unittest.TestCase ):
A__ : str = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A__ : List[Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ : List[Any] = False
A__ : List[str] = False
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Optional[int] = TFFunnelModelTester(self )
snake_case : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
@require_tf
class a_ ( a , unittest.TestCase ):
A__ : str = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A__ : Optional[int] = False
A__ : Dict = False
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Tuple = TFFunnelModelTester(self , base=UpperCAmelCase__ )
snake_case : Any = ConfigTester(self , config_class=UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*UpperCAmelCase__ )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
| 709 |
import torch
from diffusers import DiffusionPipeline
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
def __call__( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
snake_case : Dict = 1
snake_case : Optional[Any] = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
snake_case : List[Any] = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
snake_case : List[Any] = scheduler_output - scheduler_output + torch.ones_like(UpperCAmelCase__ )
return result
| 84 | 0 |
from __future__ import annotations
def a_ ( __magic_name__ , __magic_name__ ) -> set[str]:
"""simple docstring"""
snake_case : Union[str, Any] = set(__magic_name__ ), [start]
while stack:
snake_case : List[Any] = stack.pop()
explored.add(__magic_name__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__magic_name__ )
return explored
_a : Optional[Any] = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 710 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( a ):
A__ : List[str] = ['image_processor', 'tokenizer']
A__ : Any = 'CLIPImageProcessor'
A__ : Optional[int] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase__ , )
snake_case : List[Any] = kwargs.pop('''feature_extractor''' )
snake_case : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self : Any , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case : int = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if images is not None:
snake_case : Dict = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and images is not None:
snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : str ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : int = self.tokenizer.model_input_names
snake_case : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase__ , )
return self.image_processor_class
@property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase__ , )
return self.image_processor
| 84 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_a : List[Any] = 0
_a : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_a : Any = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_a : Dict = tuple[int, int]
class a_ :
def __init__( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Node | None , ):
"""simple docstring"""
snake_case : str = pos_x
snake_case : int = pos_y
snake_case : int = (pos_y, pos_x)
snake_case : List[str] = goal_x
snake_case : Tuple = goal_y
snake_case : str = g_cost
snake_case : Optional[Any] = parent
snake_case : Dict = self.calculate_heuristic()
snake_case : Dict = self.g_cost + self.h_cost
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[Any] = self.pos_x - self.goal_x
snake_case : List[Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(UpperCAmelCase__ ) + abs(UpperCAmelCase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[int] , UpperCAmelCase__ : Node ):
"""simple docstring"""
return self.f_cost < other.f_cost
class a_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : TPosition , UpperCAmelCase__ : TPosition ):
"""simple docstring"""
snake_case : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCAmelCase__ )
snake_case : Optional[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , UpperCAmelCase__ )
snake_case : Tuple = [self.start]
snake_case : list[Node] = []
snake_case : Tuple = False
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case : Tuple = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(UpperCAmelCase__ )
self.closed_nodes.append(UpperCAmelCase__ )
snake_case : str = self.get_successors(UpperCAmelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCAmelCase__ )
else:
# retrieve the best current path
snake_case : Tuple = self.open_nodes.pop(self.open_nodes.index(UpperCAmelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCAmelCase__ )
else:
self.open_nodes.append(UpperCAmelCase__ )
return [self.start.pos]
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Node ):
"""simple docstring"""
snake_case : Dict = []
for action in delta:
snake_case : List[Any] = parent.pos_x + action[1]
snake_case : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCAmelCase__ , UpperCAmelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCAmelCase__ , ) )
return successors
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : Node | None ):
"""simple docstring"""
snake_case : Any = node
snake_case : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case : Dict = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self : str , UpperCAmelCase__ : TPosition , UpperCAmelCase__ : TPosition ):
"""simple docstring"""
snake_case : str = AStar(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : str = AStar(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Dict = False
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
snake_case : Dict = self.fwd_astar.open_nodes.pop(0 )
snake_case : Tuple = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
UpperCAmelCase__ , UpperCAmelCase__ )
self.fwd_astar.closed_nodes.append(UpperCAmelCase__ )
self.bwd_astar.closed_nodes.append(UpperCAmelCase__ )
snake_case : int = current_bwd_node
snake_case : Dict = current_fwd_node
snake_case : List[Any] = {
self.fwd_astar: self.fwd_astar.get_successors(UpperCAmelCase__ ),
self.bwd_astar: self.bwd_astar.get_successors(UpperCAmelCase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(UpperCAmelCase__ )
else:
# retrieve the best current path
snake_case : str = astar.open_nodes.pop(
astar.open_nodes.index(UpperCAmelCase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(UpperCAmelCase__ )
else:
astar.open_nodes.append(UpperCAmelCase__ )
return [self.fwd_astar.start.pos]
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Node , UpperCAmelCase__ : Node ):
"""simple docstring"""
snake_case : Union[str, Any] = self.fwd_astar.retrace_path(UpperCAmelCase__ )
snake_case : List[str] = self.bwd_astar.retrace_path(UpperCAmelCase__ )
bwd_path.pop()
bwd_path.reverse()
snake_case : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_a : Dict = (0, 0)
_a : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_a : int = time.time()
_a : int = AStar(init, goal)
_a : List[Any] = a_star.search()
_a : List[Any] = time.time() - start_time
print(f"AStar execution time = {end_time:f} seconds")
_a : Optional[int] = time.time()
_a : str = BidirectionalAStar(init, goal)
_a : Any = time.time() - bd_start_time
print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
| 711 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_a : Optional[Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
_a : str = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
_a : List[Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Optional[int]=0.9 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : List[Any]=500 , UpperCAmelCase__ : Union[str, Any]="gpt2-large" , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : List[Any]=25 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=25 , ):
"""simple docstring"""
snake_case : List[str] = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 84 | 0 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('''only integers accepted as input''' )
else:
snake_case : str = str(abs(__magic_name__ ) )
snake_case : Optional[Any] = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int(''''''.join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 712 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
if "cls_token" in name:
snake_case : Tuple = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
snake_case : Optional[int] = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
snake_case : List[str] = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case : List[Any] = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case : int = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
snake_case : int = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
snake_case : Optional[Any] = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
snake_case : str = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case : Any = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
snake_case : Dict = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
snake_case : Dict = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
snake_case : Dict = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
snake_case : Optional[int] = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def a_ ( __magic_name__ , __magic_name__ ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case : Union[str, Any] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
snake_case : Optional[int] = key.split('''.''' )
snake_case : int = int(key_split[1] )
if "decoder_blocks" in key:
snake_case : List[str] = config.decoder_hidden_size
snake_case : List[Any] = '''decoder.decoder_layers.'''
if "weight" in key:
snake_case : str = val[:dim, :]
snake_case : Optional[Any] = val[dim : dim * 2, :]
snake_case : Any = val[-dim:, :]
elif "bias" in key:
snake_case : Optional[Any] = val[:dim]
snake_case : List[Any] = val[dim : dim * 2]
snake_case : List[Any] = val[-dim:]
else:
snake_case : Optional[int] = config.hidden_size
snake_case : Tuple = '''vit.encoder.layer.'''
if "weight" in key:
snake_case : Optional[Any] = val[:dim, :]
snake_case : str = val[dim : dim * 2, :]
snake_case : Union[str, Any] = val[-dim:, :]
elif "bias" in key:
snake_case : Tuple = val[:dim]
snake_case : int = val[dim : dim * 2]
snake_case : Optional[Any] = val[-dim:]
else:
snake_case : Optional[Any] = val
return orig_state_dict
def a_ ( __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : List[str] = ViTMAEConfig()
if "large" in checkpoint_url:
snake_case : str = 1_024
snake_case : Tuple = 4_096
snake_case : Optional[Any] = 24
snake_case : List[Any] = 16
elif "huge" in checkpoint_url:
snake_case : Tuple = 14
snake_case : int = 1_280
snake_case : Dict = 5_120
snake_case : Tuple = 32
snake_case : Optional[Any] = 16
snake_case : Optional[Any] = ViTMAEForPreTraining(__magic_name__ )
snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''model''']
snake_case : int = ViTMAEImageProcessor(size=config.image_size )
snake_case : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
snake_case : Tuple = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
snake_case : List[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
snake_case : Dict = ViTMAEImageProcessor(size=config.image_size )
snake_case : str = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
snake_case : Union[str, Any] = model(**__magic_name__ )
snake_case : Optional[Any] = outputs.logits
if "large" in checkpoint_url:
snake_case : Any = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
snake_case : List[Any] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
snake_case : Dict = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a : str = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 84 | 0 |
import requests
from bsa import BeautifulSoup
def a_ ( __magic_name__ , __magic_name__ ) -> str:
"""simple docstring"""
snake_case : Union[str, Any] = BeautifulSoup(requests.get(__magic_name__ , params=__magic_name__ ).content , '''html.parser''' )
snake_case : Optional[int] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
snake_case : Tuple = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_a : Tuple = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 713 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : Optional[Any] = 16
_a : Union[str, Any] = 32
def a_ ( __magic_name__ , __magic_name__ = 16 ) -> Dict:
"""simple docstring"""
snake_case : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case : Union[str, Any] = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case : Dict = 8
else:
snake_case : Union[str, Any] = None
return tokenizer.pad(
__magic_name__ , padding='''longest''' , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
snake_case : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : Optional[int] = mocked_dataloaders # noqa: F811
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __magic_name__ ) == "1":
snake_case : Optional[int] = 2
# Initialize accelerator
snake_case : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : Dict = config['''lr''']
snake_case : Any = int(config['''num_epochs'''] )
snake_case : List[str] = int(config['''seed'''] )
snake_case : List[Any] = int(config['''batch_size'''] )
snake_case : Tuple = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : str = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case : Optional[int] = AdamW(params=model.parameters() , lr=__magic_name__ )
snake_case , snake_case : List[Any] = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate scheduler
snake_case : int = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case : Tuple = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case : int = model(**__magic_name__ )
snake_case : Optional[int] = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case : List[str] = model(**__magic_name__ )
snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
snake_case , snake_case : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
snake_case : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__magic_name__ , default=__magic_name__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case : Optional[Any] = parser.parse_args()
snake_case : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 84 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[Any]=24 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Optional[int]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Optional[Any]=2 , ):
"""simple docstring"""
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : str = patch_size
snake_case : Union[str, Any] = max_length
snake_case : str = num_mel_bins
snake_case : Any = is_training
snake_case : Union[str, Any] = use_labels
snake_case : Tuple = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : str = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : str = scope
snake_case : int = frequency_stride
snake_case : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
snake_case : Any = (self.max_length - self.patch_size) // self.time_stride + 1
snake_case : Union[str, Any] = frequency_out_dimension * time_out_dimension
snake_case : Union[str, Any] = num_patches + 2
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
snake_case : str = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[str] = self.get_config()
return config, input_values, labels
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : str = ASTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : int = self.prepare_config_and_inputs()
(
snake_case
) : int = config_and_inputs
snake_case : Tuple = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class a_ ( a , a , unittest.TestCase ):
A__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
A__ : int = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Dict = False
A__ : int = False
A__ : Optional[int] = False
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[int] = ASTModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Any = model_class(UpperCAmelCase__ )
snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : List[str] = ['''input_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = ASTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Dict = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
snake_case : int = torchaudio.load(__magic_name__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class a_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : List[str] = self.default_feature_extractor
snake_case : str = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(UpperCAmelCase__ )
snake_case : str = self.default_feature_extractor
snake_case : int = prepare_audio()
snake_case : Optional[int] = audio.squeeze().numpy()
snake_case : Optional[Any] = feature_extractor(UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case : Union[str, Any] = model(**UpperCAmelCase__ )
# verify the logits
snake_case : Any = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
snake_case : str = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 714 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_a : Dict = logging.get_logger(__name__)
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = None ) -> str:
"""simple docstring"""
snake_case : Any = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
snake_case : str = to_pil_image(__magic_name__ )
snake_case , snake_case : Union[str, Any] = pil_image.size
snake_case : List[Any] = pytesseract.image_to_data(__magic_name__ , lang=__magic_name__ , output_type='''dict''' , config=__magic_name__ )
snake_case , snake_case , snake_case , snake_case , snake_case : Optional[Any] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
snake_case : Union[str, Any] = [idx for idx, word in enumerate(__magic_name__ ) if not word.strip()]
snake_case : Union[str, Any] = [word for idx, word in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : Optional[Any] = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case : List[Any] = []
for x, y, w, h in zip(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
snake_case : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(__magic_name__ )
# finally, normalize the bounding boxes
snake_case : List[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__magic_name__ , __magic_name__ , __magic_name__ ) )
assert len(__magic_name__ ) == len(__magic_name__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a_ ( a ):
A__ : int = ['pixel_values']
def __init__( self : Optional[int] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "" , **UpperCAmelCase__ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Any = size if size is not None else {'''height''': 224, '''width''': 224}
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : Dict = do_resize
snake_case : str = size
snake_case : Optional[int] = resample
snake_case : Union[str, Any] = apply_ocr
snake_case : int = ocr_lang
snake_case : Union[str, Any] = tesseract_config
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ):
"""simple docstring"""
snake_case : Dict = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
snake_case : Tuple = (size['''height'''], size['''width'''])
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : List[Any] , ):
"""simple docstring"""
snake_case : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case : List[Any] = size if size is not None else self.size
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : str = resample if resample is not None else self.resample
snake_case : Optional[int] = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case : List[str] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
snake_case : Any = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
snake_case : Optional[int] = []
snake_case : Union[str, Any] = []
for image in images:
snake_case , snake_case : List[Any] = apply_tesseract(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
words_batch.append(UpperCAmelCase__ )
boxes_batch.append(UpperCAmelCase__ )
if do_resize:
snake_case : Any = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case : int = [flip_channel_order(UpperCAmelCase__ ) for image in images]
snake_case : Optional[Any] = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
snake_case : List[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase__ )
if apply_ocr:
snake_case : Dict = words_batch
snake_case : Dict = boxes_batch
return data
| 84 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a : Union[str, Any] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[Any]=24 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Optional[int]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Optional[Any]=2 , ):
"""simple docstring"""
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : str = patch_size
snake_case : Union[str, Any] = max_length
snake_case : str = num_mel_bins
snake_case : Any = is_training
snake_case : Union[str, Any] = use_labels
snake_case : Tuple = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : str = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : str = scope
snake_case : int = frequency_stride
snake_case : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
snake_case : Any = (self.max_length - self.patch_size) // self.time_stride + 1
snake_case : Union[str, Any] = frequency_out_dimension * time_out_dimension
snake_case : Union[str, Any] = num_patches + 2
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
snake_case : str = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[str] = self.get_config()
return config, input_values, labels
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : str = ASTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : int = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : int = config_and_inputs
snake_case : Tuple = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class a_ ( a , a , unittest.TestCase ):
A__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
A__ : int = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Dict = False
A__ : int = False
A__ : Optional[int] = False
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[int] = ASTModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Any = model_class(UpperCAmelCase__ )
snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : List[str] = ['''input_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = ASTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Dict = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
snake_case , snake_case : int = torchaudio.load(__magic_name__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class a_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : List[str] = self.default_feature_extractor
snake_case : str = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(UpperCAmelCase__ )
snake_case : str = self.default_feature_extractor
snake_case , snake_case : int = prepare_audio()
snake_case : Optional[int] = audio.squeeze().numpy()
snake_case : Optional[Any] = feature_extractor(UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case : Union[str, Any] = model(**UpperCAmelCase__ )
# verify the logits
snake_case : Any = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
snake_case : str = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 84 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def a_ ( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[int] = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
snake_case : Dict = DatasetInfosDict.from_directory(__magic_name__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def a_ ( __magic_name__ , __magic_name__ ) -> List[str]:
"""simple docstring"""
snake_case : Optional[int] = str(__magic_name__ )
dataset_info.write_to_directory(__magic_name__ )
snake_case : Optional[Any] = DatasetInfo.from_directory(__magic_name__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__magic_name__ , '''dataset_info.json''' ) )
def a_ ( ) -> Any:
"""simple docstring"""
snake_case : Tuple = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
snake_case : List[Any] = dataset_info._to_yaml_dict()
assert sorted(__magic_name__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case : Optional[Any] = yaml.safe_dump(__magic_name__ )
snake_case : str = yaml.safe_load(__magic_name__ )
assert dataset_info_yaml_dict == reloaded
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : List[str] = DatasetInfo()
snake_case : Any = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1_337 ),
} ),
] , )
def a_ ( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
snake_case : str = str(__magic_name__ )
dataset_infos_dict.write_to_directory(__magic_name__ )
snake_case : List[Any] = DatasetInfosDict.from_directory(__magic_name__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case : int = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__magic_name__ , '''README.md''' ) )
| 716 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : Union[str, Any] = logging.getLogger(__name__)
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class a_ :
A__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class a_ :
A__ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
A__ : str = field(metadata={'help': 'Should contain the data files for the task.'} )
A__ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A__ : bool = field(
default=a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case , snake_case , snake_case : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __magic_name__ )
# Set seed
set_seed(training_args.seed )
try:
snake_case : int = processors[data_args.task_name]()
snake_case : List[str] = processor.get_labels()
snake_case : str = len(__magic_name__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case : Optional[int] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__magic_name__ ) -> Dict:
snake_case : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__magic_name__ , p.label_ids )}
# Data collator
snake_case : Dict = DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case : List[Any] = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case : Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case : Optional[Any] = trainer.evaluate()
snake_case : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__magic_name__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__magic_name__ )
return results
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 84 | 0 |
import os
def a_ ( ) -> Any:
"""simple docstring"""
snake_case : Union[str, Any] = os.path.join(os.path.dirname(__magic_name__ ) , '''num.txt''' )
with open(__magic_name__ ) as file_hand:
return str(sum(int(__magic_name__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 717 |
import re
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
snake_case : List[str] = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
_a : Any = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 84 | 0 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 718 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : int=18 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Optional[int]=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=True , ):
"""simple docstring"""
snake_case : int = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = num_channels
snake_case : Union[str, Any] = image_size
snake_case : Dict = min_resolution
snake_case : Dict = max_resolution
snake_case : int = do_resize
snake_case : List[str] = size
snake_case : List[Any] = apply_ocr
def lowerCAmelCase( self : int ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a_ ( a , unittest.TestCase ):
A__ : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''apply_ocr''' ) )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
# Initialize image_processing
snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
snake_case : Dict = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : List[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : Tuple = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# with apply_OCR = True
snake_case : int = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case : List[Any] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
snake_case : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
snake_case : Any = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case : Optional[Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
snake_case : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
snake_case : str = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
snake_case : Optional[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 84 | 0 |
import math
def a_ ( __magic_name__ ) -> list:
"""simple docstring"""
snake_case : str = [True] * n
snake_case : int = False
snake_case : Any = False
snake_case : Any = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
snake_case : Union[str, Any] = i * 2
while index < n:
snake_case : Dict = False
snake_case : Dict = index + i
snake_case : Tuple = [2]
for i in range(3 , __magic_name__ , 2 ):
if is_prime[i]:
primes.append(__magic_name__ )
return primes
def a_ ( __magic_name__ = 999_966_663_333 ) -> int:
"""simple docstring"""
snake_case : Dict = math.floor(math.sqrt(__magic_name__ ) ) + 100
snake_case : Union[str, Any] = prime_sieve(__magic_name__ )
snake_case : Tuple = 0
snake_case : Dict = 0
snake_case : str = primes[prime_index]
while (last_prime**2) <= limit:
snake_case : Dict = primes[prime_index + 1]
snake_case : Optional[Any] = last_prime**2
snake_case : Any = next_prime**2
# Get numbers divisible by lps(current)
snake_case : List[str] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
snake_case : List[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
snake_case : Optional[Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
snake_case : Tuple = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 719 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=8 ) -> str:
"""simple docstring"""
snake_case : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if latents is None:
snake_case : int = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case : Optional[Any] = latents.to(UpperCAmelCase__ )
snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case : Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
snake_case : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Optional[int] = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
snake_case : Optional[int] = self._execution_device
snake_case : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Any = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : int = torch.cat(UpperCAmelCase__ , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Dict = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Tuple = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
snake_case : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
snake_case : str = self.scheduler.timesteps
snake_case : Optional[Any] = self.movq.config.latent_channels
snake_case , snake_case : Optional[Any] = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
snake_case : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Optional[int] = {'''image_embeds''': image_embeds, '''hint''': hint}
snake_case : Any = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Any = noise_pred.chunk(2 )
snake_case , snake_case : Dict = variance_pred.chunk(2 )
snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
snake_case : List[Any] = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case : Optional[Any] = image * 0.5 + 0.5
snake_case : int = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : str = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
if "cls_token" in name:
snake_case : Tuple = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
snake_case : Optional[int] = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
snake_case : List[str] = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case : List[Any] = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case : int = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
snake_case : int = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
snake_case : Optional[Any] = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
snake_case : str = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case : Any = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
snake_case : Dict = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
snake_case : Dict = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
snake_case : Dict = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
snake_case : Optional[int] = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def a_ ( __magic_name__ , __magic_name__ ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case : Union[str, Any] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
snake_case : Optional[int] = key.split('''.''' )
snake_case : int = int(key_split[1] )
if "decoder_blocks" in key:
snake_case : List[str] = config.decoder_hidden_size
snake_case : List[Any] = '''decoder.decoder_layers.'''
if "weight" in key:
snake_case : str = val[:dim, :]
snake_case : Optional[Any] = val[dim : dim * 2, :]
snake_case : Any = val[-dim:, :]
elif "bias" in key:
snake_case : Optional[Any] = val[:dim]
snake_case : List[Any] = val[dim : dim * 2]
snake_case : List[Any] = val[-dim:]
else:
snake_case : Optional[int] = config.hidden_size
snake_case : Tuple = '''vit.encoder.layer.'''
if "weight" in key:
snake_case : Optional[Any] = val[:dim, :]
snake_case : str = val[dim : dim * 2, :]
snake_case : Union[str, Any] = val[-dim:, :]
elif "bias" in key:
snake_case : Tuple = val[:dim]
snake_case : int = val[dim : dim * 2]
snake_case : Optional[Any] = val[-dim:]
else:
snake_case : Optional[Any] = val
return orig_state_dict
def a_ ( __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : List[str] = ViTMAEConfig()
if "large" in checkpoint_url:
snake_case : str = 1_024
snake_case : Tuple = 4_096
snake_case : Optional[Any] = 24
snake_case : List[Any] = 16
elif "huge" in checkpoint_url:
snake_case : Tuple = 14
snake_case : int = 1_280
snake_case : Dict = 5_120
snake_case : Tuple = 32
snake_case : Optional[Any] = 16
snake_case : Optional[Any] = ViTMAEForPreTraining(__magic_name__ )
snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''model''']
snake_case : int = ViTMAEImageProcessor(size=config.image_size )
snake_case : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
snake_case : Tuple = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
snake_case : List[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
snake_case : Dict = ViTMAEImageProcessor(size=config.image_size )
snake_case : str = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
snake_case : Union[str, Any] = model(**__magic_name__ )
snake_case : Optional[Any] = outputs.logits
if "large" in checkpoint_url:
snake_case : Any = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
snake_case : List[Any] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
snake_case : Dict = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a : str = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 720 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a_ ( a , unittest.TestCase ):
A__ : Dict = ReformerTokenizer
A__ : Optional[int] = ReformerTokenizerFast
A__ : str = True
A__ : Tuple = False
A__ : str = True
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
super().setUp()
snake_case : str = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = '''<s>'''
snake_case : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1_000 )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case : Any = self.get_tokenizer()
snake_case : str = self.get_rust_tokenizer()
snake_case : Tuple = '''I was born in 92000, and this is falsé.'''
snake_case : str = tokenizer.tokenize(UpperCAmelCase__ )
snake_case : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
snake_case : List[str] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : Optional[int] = tokenizer.encode(UpperCAmelCase__ )
snake_case : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any]=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : str = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# Simple input
snake_case : Union[str, Any] = '''This is a simple input'''
snake_case : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case : int = ('''This is a simple input''', '''This is a pair''')
snake_case : int = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
snake_case : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
snake_case : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case : int = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Any = '''Hello World!'''
snake_case : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case : Union[str, Any] = ''' '''.join(UpperCAmelCase__ )
snake_case : Optional[int] = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' )
snake_case : List[str] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
snake_case : Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case : Tuple = encoded_sequence['''input_ids'''].shape
snake_case : List[Any] = ReformerModel(UpperCAmelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
# fmt: off
snake_case : Tuple = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case : Tuple = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=UpperCAmelCase__ , sequences=UpperCAmelCase__ , )
| 84 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_a : Optional[int] = logging.get_logger(__name__)
_a : Any = ['model.decoder.embed_positions.weights']
def a_ ( __magic_name__ ) -> str:
"""simple docstring"""
if "emb" in name:
snake_case : List[Any] = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
snake_case : Dict = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
snake_case : Optional[int] = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
snake_case : Union[str, Any] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
snake_case : Tuple = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
snake_case : Tuple = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
snake_case : Optional[int] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
snake_case : str = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
snake_case : str = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
snake_case : int = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case : Any = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def a_ ( __magic_name__ , __magic_name__ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
snake_case : Optional[int] = list(state_dict.keys() )
snake_case : List[str] = {}
for key in keys:
snake_case : Optional[Any] = state_dict.pop(__magic_name__ )
snake_case : str = rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case : Any = val[:hidden_size, :]
snake_case : Dict = val[hidden_size : 2 * hidden_size, :]
snake_case : Dict = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case : Optional[Any] = val
else:
snake_case : Dict = val
return state_dict, enc_dec_proj_state_dict
def a_ ( __magic_name__ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
snake_case : Optional[Any] = 1_024
snake_case : str = 24
snake_case : Any = 16
elif checkpoint == "medium":
snake_case : Optional[Any] = 1_536
snake_case : Optional[int] = 48
snake_case : Union[str, Any] = 24
elif checkpoint == "large":
snake_case : Dict = 2_048
snake_case : Optional[Any] = 48
snake_case : str = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
snake_case : str = MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def a_ ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[int] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
snake_case : str = decoder_config_from_checkpoint(__magic_name__ )
snake_case : int = fairseq_model.lm.state_dict()
snake_case : Any = rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
snake_case : int = TaEncoderModel.from_pretrained('''t5-base''' )
snake_case : List[str] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
snake_case : Tuple = MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case : Dict = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__magic_name__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
snake_case : Any = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
snake_case : Any = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case : Dict = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case : Union[str, Any] = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
snake_case : List[Any] = AutoTokenizer.from_pretrained('''t5-base''' )
snake_case : Optional[Any] = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
snake_case : Any = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
snake_case : Union[str, Any] = 2_048
snake_case : str = 2_048
# set other default generation config params
snake_case : int = int(30 * audio_encoder.config.frame_rate )
snake_case : Optional[int] = True
snake_case : Optional[Any] = 3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
_a : List[str] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 721 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case , snake_case : Any = image.size
snake_case , snake_case : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case : Dict = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
snake_case : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case : Tuple = torch.from_numpy(__magic_name__ )
return 2.0 * image - 1.0
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : VQModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[int] = 100 , UpperCAmelCase__ : Optional[float] = 0.0 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[int] = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
snake_case : Any = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase__ )}" )
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[Any] = preprocess(UpperCAmelCase__ )
snake_case , snake_case : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case : str = next(self.unet.parameters() ).dtype
snake_case : Dict = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
snake_case : Any = image.to(device=self.device , dtype=UpperCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device )
snake_case : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_eta:
snake_case : Dict = eta
for t in self.progress_bar(UpperCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
snake_case : Optional[int] = torch.cat([latents, image] , dim=1 )
snake_case : str = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
snake_case : int = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : Any = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
snake_case : Optional[int] = self.vqvae.decode(UpperCAmelCase__ ).sample
snake_case : int = torch.clamp(UpperCAmelCase__ , -1.0 , 1.0 )
snake_case : Dict = image / 2 + 0.5
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Any = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_a : Tuple = 'true'
def a_ ( __magic_name__ , __magic_name__=82 , __magic_name__=16 ) -> Optional[int]:
"""simple docstring"""
set_seed(42 )
snake_case : int = RegressionModel()
snake_case : Union[str, Any] = deepcopy(__magic_name__ )
snake_case : List[Any] = RegressionDataset(length=__magic_name__ )
snake_case : Optional[int] = DataLoader(__magic_name__ , batch_size=__magic_name__ )
model.to(accelerator.device )
snake_case : List[str] = accelerator.prepare(__magic_name__ , __magic_name__ )
return model, ddp_model, dataloader
def a_ ( __magic_name__ , __magic_name__=False ) -> List[Any]:
"""simple docstring"""
snake_case : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
snake_case : int = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__magic_name__ ):
snake_case : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
with accelerator.main_process_first():
snake_case : str = dataset.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
snake_case : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ ):
if use_longest:
return tokenizer.pad(__magic_name__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__magic_name__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(__magic_name__ , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=16 )
def a_ ( __magic_name__ , __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case : Union[str, Any] = Accelerator(dispatch_batches=__magic_name__ , split_batches=__magic_name__ )
snake_case : Union[str, Any] = get_dataloader(__magic_name__ , not dispatch_batches )
snake_case : Any = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__magic_name__ )
snake_case : Optional[Any] = accelerator.prepare(__magic_name__ , __magic_name__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case : Dict = []
for batch in dataloader:
snake_case : Optional[int] = batch.values()
with torch.no_grad():
snake_case : Tuple = model(__magic_name__ )
snake_case : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
snake_case : Any = [], []
for logit, targ in logits_and_targets:
logits.append(__magic_name__ )
targs.append(__magic_name__ )
snake_case : List[str] = torch.cat(__magic_name__ ), torch.cat(__magic_name__ )
return logits, targs
def a_ ( __magic_name__ , __magic_name__=82 , __magic_name__=False , __magic_name__=False , __magic_name__=16 ) -> str:
"""simple docstring"""
snake_case : str = get_basic_setup(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case : Optional[int] = generate_predictions(__magic_name__ , __magic_name__ , __magic_name__ )
assert (
len(__magic_name__ ) == num_samples
), F"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__magic_name__ )}"
def a_ ( __magic_name__ = False , __magic_name__ = False ) -> Any:
"""simple docstring"""
snake_case : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
snake_case : Optional[Any] = get_mrpc_setup(__magic_name__ , __magic_name__ )
# First do baseline
snake_case : str = setup['''no''']
model.to(__magic_name__ )
model.eval()
for batch in dataloader:
batch.to(__magic_name__ )
with torch.inference_mode():
snake_case : List[str] = model(**__magic_name__ )
snake_case : List[str] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__magic_name__ , references=batch['''labels'''] )
snake_case : List[str] = metric.compute()
# Then do distributed
snake_case : Tuple = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
snake_case : Optional[Any] = model(**__magic_name__ )
snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
snake_case : Optional[int] = batch['''labels''']
snake_case : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__magic_name__ , references=__magic_name__ )
snake_case : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def a_ ( ) -> List[str]:
"""simple docstring"""
snake_case : str = Accelerator(split_batches=__magic_name__ , dispatch_batches=__magic_name__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(__magic_name__ , __magic_name__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
snake_case : Dict = Accelerator(split_batches=__magic_name__ , dispatch_batches=__magic_name__ )
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(__magic_name__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
snake_case : str = Accelerator()
test_torch_metrics(__magic_name__ , 512 )
accelerator.state._reset_state()
def a_ ( __magic_name__ ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 700 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class a_ ( a ):
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Dict = 5
# Realm tok
snake_case : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
snake_case : Any = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case : Tuple = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[int] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Dict = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=UpperCAmelCase__ , )
return block_records
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Tuple = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[str] = self.get_config()
snake_case : Optional[Any] = self.get_dummy_retriever()
snake_case : Optional[int] = retriever.tokenizer
snake_case : Dict = np.array([0, 3] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Union[str, Any] = tokenizer(
['''the fourth'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : Optional[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : List[str] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.get_config()
snake_case : Optional[int] = self.get_dummy_retriever()
snake_case : List[str] = retriever.tokenizer
snake_case : Optional[Any] = np.array([0, 3, 5] , dtype='''long''' )
snake_case : Optional[int] = tokenizer(['''Test question'''] ).input_ids
snake_case : Any = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
snake_case : List[Any] = config.reader_seq_len
snake_case , snake_case , snake_case , snake_case : Union[str, Any] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual([False, True, True] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
snake_case : Optional[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
snake_case : Any = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case : Any = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 84 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : Optional[Any] = 16
_a : Union[str, Any] = 32
def a_ ( __magic_name__ , __magic_name__ = 16 ) -> Dict:
"""simple docstring"""
snake_case : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case : Union[str, Any] = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case : Dict = 8
else:
snake_case : Union[str, Any] = None
return tokenizer.pad(
__magic_name__ , padding='''longest''' , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
snake_case : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : Optional[int] = mocked_dataloaders # noqa: F811
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __magic_name__ ) == "1":
snake_case : Optional[int] = 2
# Initialize accelerator
snake_case : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : Dict = config['''lr''']
snake_case : Any = int(config['''num_epochs'''] )
snake_case : List[str] = int(config['''seed'''] )
snake_case : List[Any] = int(config['''batch_size'''] )
snake_case : Tuple = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : str = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case : Optional[int] = AdamW(params=model.parameters() , lr=__magic_name__ )
snake_case : List[Any] = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate scheduler
snake_case : int = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case : Tuple = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case : int = model(**__magic_name__ )
snake_case : Optional[int] = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case : List[str] = model(**__magic_name__ )
snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
snake_case : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
snake_case : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__magic_name__ , default=__magic_name__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case : Optional[Any] = parser.parse_args()
snake_case : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 701 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : str = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a_ ( __magic_name__ , __magic_name__ ) -> Dict:
"""simple docstring"""
snake_case : Dict = args.log_outputs
snake_case : Optional[int] = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
snake_case : Optional[Any] = load_metric('''wer''' )
snake_case : Union[str, Any] = load_metric('''cer''' )
# compute metrics
snake_case : Union[str, Any] = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
snake_case : List[Any] = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
snake_case : Optional[Any] = F"WER: {wer_result}\nCER: {cer_result}"
print(__magic_name__ )
with open(F"{dataset_id}_eval_results.txt" , '''w''' ) as f:
f.write(__magic_name__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case : Optional[int] = F"log_{dataset_id}_predictions.txt"
snake_case : Optional[int] = F"log_{dataset_id}_targets.txt"
with open(__magic_name__ , '''w''' ) as p, open(__magic_name__ , '''w''' ) as t:
# mapping function to write output
def write_to_file(__magic_name__ , __magic_name__ ):
p.write(F"{i}" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"{i}" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(__magic_name__ , with_indices=__magic_name__ )
def a_ ( __magic_name__ ) -> str:
"""simple docstring"""
snake_case : List[Any] = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case : Optional[Any] = re.sub(__magic_name__ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case : List[str] = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
snake_case : Optional[int] = ''' '''.join(text.split(__magic_name__ ) )
return text
def a_ ( __magic_name__ ) -> List[str]:
"""simple docstring"""
snake_case : Dict = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__magic_name__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case : int = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case : Optional[Any] = feature_extractor.sampling_rate
# resample audio
snake_case : Tuple = dataset.cast_column('''audio''' , Audio(sampling_rate=__magic_name__ ) )
# load eval pipeline
if args.device is None:
snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else -1
snake_case : Optional[int] = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__magic_name__ ):
snake_case : List[str] = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case : List[Any] = prediction['''text''']
snake_case : List[Any] = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
snake_case : str = dataset.map(__magic_name__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
_a : Any = parser.parse_args()
main(args)
| 702 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_a : Union[str, Any] = {
'yjernite/retribert-base-uncased': 512,
}
_a : Tuple = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class a_ ( a ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Any = PRETRAINED_INIT_CONFIGURATION
A__ : Optional[Any] = RetriBertTokenizer
A__ : Any = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict="[UNK]" , UpperCAmelCase__ : str="[SEP]" , UpperCAmelCase__ : Union[str, Any]="[PAD]" , UpperCAmelCase__ : Dict="[CLS]" , UpperCAmelCase__ : Optional[Any]="[MASK]" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Dict , ):
"""simple docstring"""
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
snake_case : int = getattr(UpperCAmelCase__ , normalizer_state.pop('''type''' ) )
snake_case : List[Any] = do_lower_case
snake_case : Union[str, Any] = strip_accents
snake_case : int = tokenize_chinese_chars
snake_case : int = normalizer_class(**UpperCAmelCase__ )
snake_case : Union[str, Any] = do_lower_case
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=None ):
"""simple docstring"""
snake_case : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : List[Any] = [self.sep_token_id]
snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
snake_case : Tuple = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 84 | 0 |
from __future__ import annotations
_a : str = 8.9_88e9 # units = N * m^s * C^-2
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> dict[str, float]:
"""simple docstring"""
snake_case : Any = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
snake_case : Any = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
snake_case : List[str] = abs(__magic_name__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
snake_case : Union[str, Any] = abs(__magic_name__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
snake_case : Any = (COULOMBS_CONSTANT * charge_product / abs(__magic_name__ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
import string
import numpy
def a_ ( __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , __magic_name__ )
class a_ :
A__ : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
A__ : List[str] = numpy.vectorize(lambda a : x % 36 )
A__ : Dict = numpy.vectorize(a )
def __init__( self : List[str] , UpperCAmelCase__ : numpy.ndarray ):
"""simple docstring"""
snake_case : int = self.modulus(UpperCAmelCase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
snake_case : List[str] = encrypt_key.shape[0]
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
return self.key_string.index(UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : int ):
"""simple docstring"""
return self.key_string[round(UpperCAmelCase__ )]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : Tuple = det % len(self.key_string )
snake_case : Tuple = len(self.key_string )
if greatest_common_divisor(UpperCAmelCase__ , len(self.key_string ) ) != 1:
snake_case : List[Any] = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = [char for char in text.upper() if char in self.key_string]
snake_case : Optional[int] = chars[-1]
while len(UpperCAmelCase__ ) % self.break_key != 0:
chars.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = self.process_text(text.upper() )
snake_case : Optional[int] = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : int = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : Tuple = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(self.encrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[
0
]
snake_case : Dict = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : int = det % len(self.key_string )
snake_case : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
snake_case : Any = i
break
snake_case : Any = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCAmelCase__ ) )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Any = self.make_decrypt_key()
snake_case : Optional[Any] = self.process_text(text.upper() )
snake_case : int = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : Any = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : List[str] = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(decrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[0]
snake_case : int = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a_ ( ) -> None:
"""simple docstring"""
snake_case : Any = int(input('''Enter the order of the encryption key: ''' ) )
snake_case : List[Any] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(__magic_name__ ):
snake_case : Optional[Any] = [int(__magic_name__ ) for x in input().split()]
hill_matrix.append(__magic_name__ )
snake_case : List[str] = HillCipher(numpy.array(__magic_name__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
snake_case : int = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
snake_case : List[Any] = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(__magic_name__ ) )
elif option == "2":
snake_case : int = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 84 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_a : int = logging.getLogger(__name__)
class a_ :
def __init__( self : List[str] ):
"""simple docstring"""
snake_case : Union[str, Any] = False
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any ):
"""simple docstring"""
if not self.initialized:
snake_case : Tuple = RagRetriever(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , index=UpperCAmelCase__ , init_retrieval=UpperCAmelCase__ , )
snake_case : str = True
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
self.retriever.index.init_index()
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ):
"""simple docstring"""
snake_case : str = self.retriever._main_retrieve(UpperCAmelCase__ , UpperCAmelCase__ )
return doc_ids, retrieved_doc_embeds
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=None ):
"""simple docstring"""
if index is not None and index.is_initialized() and len(UpperCAmelCase__ ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , index=UpperCAmelCase__ , init_retrieval=UpperCAmelCase__ , )
snake_case : Union[str, Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
for worker in self.retrieval_workers
] )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] ):
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
snake_case : Tuple = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
snake_case : Dict = ray.get(random_worker.retrieve.remote(UpperCAmelCase__ , UpperCAmelCase__ ) )
else:
snake_case : List[Any] = self._main_retrieve(UpperCAmelCase__ , UpperCAmelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase__ )
@classmethod
def lowerCAmelCase( cls : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : str ):
"""simple docstring"""
return super(UpperCAmelCase__ , cls ).get_tokenizers(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
@classmethod
def lowerCAmelCase( cls : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = kwargs.pop('''config''' , UpperCAmelCase__ ) or RagConfig.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
snake_case : Any = RagTokenizer.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ )
snake_case : Tuple = rag_tokenizer.question_encoder
snake_case : int = rag_tokenizer.generator
if indexed_dataset is not None:
snake_case : Tuple = '''custom'''
snake_case : Union[str, Any] = CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase__ )
else:
snake_case : List[Any] = cls._build_index(UpperCAmelCase__ )
return cls(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , retrieval_workers=UpperCAmelCase__ , index=UpperCAmelCase__ , )
| 704 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( a ):
A__ : List[Any] = 'Salesforce/blip-image-captioning-base'
A__ : Dict = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
A__ : str = 'image_captioner'
A__ : Dict = AutoModelForVisionaSeq
A__ : Optional[Any] = ['image']
A__ : List[str] = ['text']
def __init__( self : List[str] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=UpperCAmelCase__ , return_tensors='''pt''' )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return self.model.generate(**UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0].strip()
| 84 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class a_ :
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=99 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Dict=37 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Union[str, Any]=512 , UpperCAmelCase__ : Tuple=16 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : Tuple=None , ):
"""simple docstring"""
snake_case : Tuple = parent
snake_case : Tuple = batch_size
snake_case : int = seq_length
snake_case : str = is_training
snake_case : Any = use_input_mask
snake_case : Any = use_token_type_ids
snake_case : str = use_labels
snake_case : str = vocab_size
snake_case : Dict = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_size
snake_case : Dict = hidden_act
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : str = max_position_embeddings
snake_case : List[str] = type_vocab_size
snake_case : List[str] = type_sequence_label_size
snake_case : Any = initializer_range
snake_case : Optional[int] = num_labels
snake_case : str = num_choices
snake_case : Dict = scope
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_input_mask:
snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : List[Any] = None
if self.use_token_type_ids:
snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : List[Any] = None
snake_case : Optional[Any] = None
snake_case : Any = None
if self.use_labels:
snake_case : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : int = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Union[str, Any] = LlamaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Optional[int] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
snake_case : Optional[int] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , ):
"""simple docstring"""
snake_case : Optional[int] = True
snake_case : List[Any] = LlamaModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Optional[Any] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
snake_case : Optional[int] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
snake_case : List[str] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , ):
"""simple docstring"""
snake_case : List[str] = LlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Any = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , ):
"""simple docstring"""
snake_case : str = True
snake_case : Optional[int] = True
snake_case : Optional[int] = LlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
snake_case : str = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
snake_case : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case : List[Any] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
snake_case : int = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
# select random slice
snake_case : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Any = self.prepare_config_and_inputs()
(
snake_case
) : Tuple = config_and_inputs
snake_case : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a_ ( a , a , a , unittest.TestCase ):
A__ : Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
A__ : Any = (LlamaForCausalLM,) if is_torch_available() else ()
A__ : Union[str, Any] = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = False
A__ : Tuple = False
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Dict = LlamaModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : Union[str, Any] = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : List[str] = 3
snake_case : Dict = input_dict['''input_ids''']
snake_case : int = input_ids.ne(1 ).to(UpperCAmelCase__ )
snake_case : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case : Dict = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Union[str, Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : List[Any] = 3
snake_case : Any = '''single_label_classification'''
snake_case : Optional[int] = input_dict['''input_ids''']
snake_case : Dict = input_ids.ne(1 ).to(UpperCAmelCase__ )
snake_case : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case : int = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : str = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Tuple = 3
snake_case : Dict = '''multi_label_classification'''
snake_case : Optional[int] = input_dict['''input_ids''']
snake_case : Optional[Any] = input_ids.ne(1 ).to(UpperCAmelCase__ )
snake_case : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case : Dict = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Union[str, Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : List[str] = ids_tensor([1, 10] , config.vocab_size )
snake_case : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case : Dict = LlamaModel(UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
original_model.eval()
snake_case : Dict = original_model(UpperCAmelCase__ ).last_hidden_state
snake_case : Union[str, Any] = original_model(UpperCAmelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case : List[Any] = {'''type''': scaling_type, '''factor''': 10.0}
snake_case : int = LlamaModel(UpperCAmelCase__ )
scaled_model.to(UpperCAmelCase__ )
scaled_model.eval()
snake_case : Optional[Any] = scaled_model(UpperCAmelCase__ ).last_hidden_state
snake_case : List[Any] = scaled_model(UpperCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
snake_case : int = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
snake_case : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case : Optional[Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
snake_case : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
snake_case : Any = model(torch.tensor(UpperCAmelCase__ ) )
# Expected mean on dim = -1
snake_case : Dict = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case : List[Any] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
snake_case : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
snake_case : str = model(torch.tensor(UpperCAmelCase__ ) )
# Expected mean on dim = -1
snake_case : Optional[int] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case : Optional[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : List[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
snake_case : List[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
snake_case : Tuple = model(torch.tensor(UpperCAmelCase__ ) )
snake_case : Union[str, Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1e-2 , rtol=1e-2 )
# fmt: off
snake_case : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[int] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
snake_case : List[Any] = '''Simply put, the theory of relativity states that '''
snake_case : int = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
snake_case : List[str] = tokenizer.encode(UpperCAmelCase__ , return_tensors='''pt''' )
snake_case : List[Any] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCAmelCase__ )
# greedy generation outputs
snake_case : str = model.generate(UpperCAmelCase__ , max_new_tokens=64 , top_p=UpperCAmelCase__ , temperature=1 , do_sample=UpperCAmelCase__ )
snake_case : List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 705 |
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
snake_case : int = 4
snake_case : Optional[Any] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 84 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class a_ ( a ):
A__ : jnp.ndarray
A__ : jnp.ndarray
class a_ ( nn.Module ):
A__ : int
A__ : Tuple[int] = (16, 32, 96, 256)
A__ : jnp.dtype = jnp.floataa
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[str] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case : Union[str, Any] = []
for i in range(len(self.block_out_channels ) - 1 ):
snake_case : Any = self.block_out_channels[i]
snake_case : List[Any] = self.block_out_channels[i + 1]
snake_case : Any = nn.Conv(
UpperCAmelCase__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCAmelCase__ )
snake_case : Optional[Any] = nn.Conv(
UpperCAmelCase__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCAmelCase__ )
snake_case : Union[str, Any] = blocks
snake_case : Any = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : int , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = self.conv_in(UpperCAmelCase__ )
snake_case : Dict = nn.silu(UpperCAmelCase__ )
for block in self.blocks:
snake_case : Any = block(UpperCAmelCase__ )
snake_case : Dict = nn.silu(UpperCAmelCase__ )
snake_case : List[str] = self.conv_out(UpperCAmelCase__ )
return embedding
@flax_register_to_config
class a_ ( nn.Module , a , a ):
A__ : int = 32
A__ : int = 4
A__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
A__ : Union[bool, Tuple[bool]] = False
A__ : Tuple[int] = (320, 640, 1280, 1280)
A__ : int = 2
A__ : Union[int, Tuple[int]] = 8
A__ : Optional[Union[int, Tuple[int]]] = None
A__ : int = 1280
A__ : float = 0.0
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
A__ : bool = True
A__ : int = 0
A__ : str = "rgb"
A__ : Tuple[int] = (16, 32, 96, 256)
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : jax.random.KeyArray ):
"""simple docstring"""
snake_case : Optional[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case : List[str] = jnp.zeros(UpperCAmelCase__ , dtype=jnp.floataa )
snake_case : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
snake_case : Union[str, Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case : Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
snake_case : Tuple = jnp.zeros(UpperCAmelCase__ , dtype=jnp.floataa )
snake_case : Union[str, Any] = jax.random.split(UpperCAmelCase__ )
snake_case : Any = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )["params"]
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[Any] = self.block_out_channels
snake_case : int = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case : int = self.num_attention_heads or self.attention_head_dim
# input
snake_case : Any = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case : Dict = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case : int = FlaxTimestepEmbedding(UpperCAmelCase__ , dtype=self.dtype )
snake_case : List[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
snake_case : Any = self.only_cross_attention
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Optional[int] = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case : Any = []
snake_case : List[str] = []
snake_case : Any = block_out_channels[0]
snake_case : int = nn.Conv(
UpperCAmelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCAmelCase__ )
for i, down_block_type in enumerate(self.down_block_types ):
snake_case : Union[str, Any] = output_channel
snake_case : Any = block_out_channels[i]
snake_case : Dict = i == len(UpperCAmelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case : Tuple = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
snake_case : int = FlaxDownBlockaD(
in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCAmelCase__ )
for _ in range(self.layers_per_block ):
snake_case : Dict = nn.Conv(
UpperCAmelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCAmelCase__ )
if not is_final_block:
snake_case : Optional[int] = nn.Conv(
UpperCAmelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCAmelCase__ )
snake_case : str = down_blocks
snake_case : List[str] = controlnet_down_blocks
# mid
snake_case : str = block_out_channels[-1]
snake_case : str = FlaxUNetMidBlockaDCrossAttn(
in_channels=UpperCAmelCase__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
snake_case : Tuple = nn.Conv(
UpperCAmelCase__ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : float = 1.0 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = False , ):
"""simple docstring"""
snake_case : Tuple = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
snake_case : Dict = jnp.flip(UpperCAmelCase__ , axis=1 )
# 1. time
if not isinstance(UpperCAmelCase__ , jnp.ndarray ):
snake_case : Optional[int] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCAmelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case : Union[str, Any] = timesteps.astype(dtype=jnp.floataa )
snake_case : List[Any] = jnp.expand_dims(UpperCAmelCase__ , 0 )
snake_case : Optional[Any] = self.time_proj(UpperCAmelCase__ )
snake_case : Union[str, Any] = self.time_embedding(UpperCAmelCase__ )
# 2. pre-process
snake_case : Union[str, Any] = jnp.transpose(UpperCAmelCase__ , (0, 2, 3, 1) )
snake_case : Optional[Any] = self.conv_in(UpperCAmelCase__ )
snake_case : List[Any] = jnp.transpose(UpperCAmelCase__ , (0, 2, 3, 1) )
snake_case : Optional[int] = self.controlnet_cond_embedding(UpperCAmelCase__ )
sample += controlnet_cond
# 3. down
snake_case : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Optional[int] = down_block(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , deterministic=not train )
else:
snake_case : Union[str, Any] = down_block(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
snake_case : Dict = self.mid_block(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , deterministic=not train )
# 5. contronet blocks
snake_case : List[str] = ()
for down_block_res_sample, controlnet_block in zip(UpperCAmelCase__ , self.controlnet_down_blocks ):
snake_case : Union[str, Any] = controlnet_block(UpperCAmelCase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
snake_case : Optional[Any] = controlnet_down_block_res_samples
snake_case : Dict = self.controlnet_mid_block(UpperCAmelCase__ )
# 6. scaling
snake_case : Tuple = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=UpperCAmelCase__ , mid_block_res_sample=UpperCAmelCase__ )
| 706 |
from sklearn.metrics import fa_score
import datasets
_a : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
_a : Dict = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
_a : List[Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : List[str]="binary" , UpperCAmelCase__ : str=None ):
"""simple docstring"""
snake_case : List[Any] = fa_score(
UpperCAmelCase__ , UpperCAmelCase__ , labels=UpperCAmelCase__ , pos_label=UpperCAmelCase__ , average=UpperCAmelCase__ , sample_weight=UpperCAmelCase__ )
return {"f1": float(UpperCAmelCase__ ) if score.size == 1 else score}
| 84 | 0 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : List[Any] = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class a_ ( a ):
A__ : List[str] = 'efficientnet'
def __init__( self : int , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = 2.0 , UpperCAmelCase__ : float = 3.1 , UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase__ : List[int] = [] , UpperCAmelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase__ : float = 0.25 , UpperCAmelCase__ : str = "swish" , UpperCAmelCase__ : int = 2_560 , UpperCAmelCase__ : str = "mean" , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : float = 0.001 , UpperCAmelCase__ : float = 0.99 , UpperCAmelCase__ : float = 0.5 , UpperCAmelCase__ : float = 0.2 , **UpperCAmelCase__ : List[str] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : List[str] = num_channels
snake_case : List[str] = image_size
snake_case : int = width_coefficient
snake_case : Dict = depth_coefficient
snake_case : Any = depth_divisor
snake_case : List[str] = kernel_sizes
snake_case : str = in_channels
snake_case : Optional[Any] = out_channels
snake_case : Tuple = depthwise_padding
snake_case : Union[str, Any] = strides
snake_case : Dict = num_block_repeats
snake_case : Any = expand_ratios
snake_case : Optional[int] = squeeze_expansion_ratio
snake_case : Dict = hidden_act
snake_case : List[str] = hidden_dim
snake_case : Dict = pooling_type
snake_case : List[str] = initializer_range
snake_case : int = batch_norm_eps
snake_case : Optional[Any] = batch_norm_momentum
snake_case : Any = dropout_rate
snake_case : Optional[int] = drop_connect_rate
snake_case : List[Any] = sum(UpperCAmelCase__ ) * 4
class a_ ( a ):
A__ : List[str] = version.parse('1.11' )
@property
def lowerCAmelCase( self : str ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return 1e-5
| 707 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('''only integers accepted as input''' )
else:
snake_case : str = str(abs(__magic_name__ ) )
snake_case : Optional[Any] = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int(''''''.join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 84 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_a : Tuple = logging.get_logger(__name__)
# General docstring
_a : List[Any] = 'RegNetConfig'
# Base docstring
_a : int = 'facebook/regnet-y-040'
_a : int = [1, 1_088, 7, 7]
# Image classification docstring
_a : Tuple = 'facebook/regnet-y-040'
_a : int = 'tabby, tabby cat'
_a : Union[str, Any] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[str] = "relu" , **UpperCAmelCase__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
snake_case : Any = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
snake_case : Any = tf.keras.layers.ConvaD(
filters=UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , strides=UpperCAmelCase__ , padding='''VALID''' , groups=UpperCAmelCase__ , use_bias=UpperCAmelCase__ , name='''convolution''' , )
snake_case : int = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
snake_case : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
snake_case : Any = self.convolution(self.padding(UpperCAmelCase__ ) )
snake_case : str = self.normalization(UpperCAmelCase__ )
snake_case : Tuple = self.activation(UpperCAmelCase__ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self : Any , UpperCAmelCase__ : RegNetConfig , **UpperCAmelCase__ : int ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Union[str, Any] = config.num_channels
snake_case : List[str] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
snake_case : List[str] = shape_list(UpperCAmelCase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
snake_case : Tuple = tf.transpose(UpperCAmelCase__ , perm=(0, 2, 3, 1) )
snake_case : str = self.embedder(UpperCAmelCase__ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , **UpperCAmelCase__ : Dict ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Any = tf.keras.layers.ConvaD(
filters=UpperCAmelCase__ , kernel_size=1 , strides=UpperCAmelCase__ , use_bias=UpperCAmelCase__ , name='''convolution''' )
snake_case : Union[str, Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : bool = False ):
"""simple docstring"""
return self.normalization(self.convolution(UpperCAmelCase__ ) , training=UpperCAmelCase__ )
class a_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase__ , name='''pooler''' )
snake_case : List[str] = [
tf.keras.layers.ConvaD(filters=UpperCAmelCase__ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=UpperCAmelCase__ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
snake_case : Dict = self.pooler(UpperCAmelCase__ )
for layer_module in self.attention:
snake_case : int = layer_module(UpperCAmelCase__ )
snake_case : Optional[int] = hidden_state * pooled
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self : List[str] , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Tuple = in_channels != out_channels or stride != 1
snake_case : List[str] = max(1 , out_channels // config.groups_width )
snake_case : Tuple = (
TFRegNetShortCut(UpperCAmelCase__ , stride=UpperCAmelCase__ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
snake_case : Dict = [
TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ , name='''layer.2''' ),
]
snake_case : int = ACTaFN[config.hidden_act]
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Dict ):
"""simple docstring"""
snake_case : Optional[int] = hidden_state
for layer_module in self.layers:
snake_case : List[Any] = layer_module(UpperCAmelCase__ )
snake_case : Dict = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
snake_case : Optional[int] = self.activation(UpperCAmelCase__ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self : int , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : Dict ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : int = in_channels != out_channels or stride != 1
snake_case : str = max(1 , out_channels // config.groups_width )
snake_case : Union[str, Any] = (
TFRegNetShortCut(UpperCAmelCase__ , stride=UpperCAmelCase__ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
snake_case : str = [
TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(UpperCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ , name='''layer.3''' ),
]
snake_case : Tuple = ACTaFN[config.hidden_act]
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Union[str, Any] = hidden_state
for layer_module in self.layers:
snake_case : Any = layer_module(UpperCAmelCase__ )
snake_case : Optional[Any] = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
snake_case : str = self.activation(UpperCAmelCase__ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , **UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Dict = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
snake_case : Any = [
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , name='''layers.0''' ),
*[layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , name=F"layers.{i+1}" ) for i in range(depth - 1 )],
]
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
for layer_module in self.layers:
snake_case : Any = layer_module(UpperCAmelCase__ )
return hidden_state
class a_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : RegNetConfig , **UpperCAmelCase__ : str ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : str = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
snake_case : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCAmelCase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__ , name=F"stages.{i+1}" ) )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True ):
"""simple docstring"""
snake_case : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case : List[str] = hidden_states + (hidden_state,)
snake_case : List[Any] = stage_module(UpperCAmelCase__ )
if output_hidden_states:
snake_case : List[str] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ )
@keras_serializable
class a_ ( tf.keras.layers.Layer ):
A__ : List[str] = RegNetConfig
def __init__( self : int , UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : str = config
snake_case : Tuple = TFRegNetEmbeddings(UpperCAmelCase__ , name='''embedder''' )
snake_case : int = TFRegNetEncoder(UpperCAmelCase__ , name='''encoder''' )
snake_case : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase__ , name='''pooler''' )
@unpack_inputs
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , ):
"""simple docstring"""
snake_case : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
snake_case : Optional[Any] = self.embedder(UpperCAmelCase__ , training=UpperCAmelCase__ )
snake_case : List[Any] = self.encoder(
UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ )
snake_case : int = encoder_outputs[0]
snake_case : Tuple = self.pooler(UpperCAmelCase__ )
# Change to NCHW output format have uniformity in the modules
snake_case : Union[str, Any] = tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) )
snake_case : List[Any] = tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
snake_case : Tuple = tuple([tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class a_ ( a ):
A__ : List[Any] = RegNetConfig
A__ : List[Any] = 'regnet'
A__ : List[Any] = 'pixel_values'
@property
def lowerCAmelCase( self : int ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_a : Optional[Any] = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
_a : Union[str, Any] = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , a , )
class a_ ( a ):
def __init__( self : List[Any] , UpperCAmelCase__ : RegNetConfig , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : int ):
"""simple docstring"""
super().__init__(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
snake_case : Dict = TFRegNetMainLayer(UpperCAmelCase__ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Any]=False , ):
"""simple docstring"""
snake_case : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case : Dict = return_dict if return_dict is not None else self.config.use_return_dict
snake_case : List[Any] = self.regnet(
pixel_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , a , )
class a_ ( a , a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : RegNetConfig , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
super().__init__(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
snake_case : List[Any] = config.num_labels
snake_case : List[Any] = TFRegNetMainLayer(UpperCAmelCase__ , name='''regnet''' )
# classification head
snake_case : Optional[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : tf.Tensor = None , UpperCAmelCase__ : tf.Tensor = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : List[str]=False , ):
"""simple docstring"""
snake_case : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict
snake_case : int = self.regnet(
UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ )
snake_case : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
snake_case : Dict = self.classifier[0](UpperCAmelCase__ )
snake_case : List[Any] = self.classifier[1](UpperCAmelCase__ )
snake_case : Dict = None if labels is None else self.hf_compute_loss(labels=UpperCAmelCase__ , logits=UpperCAmelCase__ )
if not return_dict:
snake_case : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states )
| 708 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=99 , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Any=9 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : str=0.002 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , ):
"""simple docstring"""
snake_case : Union[str, Any] = parent
snake_case : Union[str, Any] = batch_size
snake_case : Any = encoder_seq_length
snake_case : str = decoder_seq_length
# For common tests
snake_case : Optional[int] = self.decoder_seq_length
snake_case : Optional[Any] = is_training
snake_case : List[Any] = use_attention_mask
snake_case : Union[str, Any] = use_labels
snake_case : Any = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : Any = d_ff
snake_case : Any = relative_attention_num_buckets
snake_case : Optional[Any] = dropout_rate
snake_case : int = initializer_factor
snake_case : Optional[Any] = eos_token_id
snake_case : Dict = pad_token_id
snake_case : Optional[Any] = decoder_start_token_id
snake_case : Union[str, Any] = None
snake_case : List[str] = decoder_layers
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
return TaConfig.from_pretrained('''google/umt5-base''' )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
snake_case : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case : List[Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if decoder_head_mask is None:
snake_case : Tuple = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if cross_attn_head_mask is None:
snake_case : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case : List[str] = input_ids.clamp(self.pad_token_id + 1 )
snake_case : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case : str = self.get_config()
snake_case : Tuple = config.num_attention_heads
snake_case : List[Any] = self.prepare_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, input_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , ):
"""simple docstring"""
snake_case : str = UMTaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : str = model(
input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , )
snake_case : int = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
snake_case : int = result.last_hidden_state
snake_case : Dict = result.past_key_values
snake_case : Dict = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval()
# first forward pass
snake_case : List[Any] = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
snake_case : List[Any] = model(UpperCAmelCase__ )
snake_case : Any = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) + 1 )
snake_case , snake_case : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : Any = model(UpperCAmelCase__ )['''last_hidden_state''']
snake_case : Optional[Any] = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )['''last_hidden_state''']
# select random slice
snake_case : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , ):
"""simple docstring"""
snake_case : int = UMTaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).half().eval()
snake_case : str = model(**UpperCAmelCase__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(UpperCAmelCase__ ).any().item() )
@require_torch
class a_ ( a , a , a , unittest.TestCase ):
A__ : str = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A__ : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A__ : Any = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A__ : Dict = True
A__ : List[str] = False
A__ : Optional[int] = False
A__ : Optional[int] = True
A__ : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A__ : int = [0.8, 0.9]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
snake_case : Optional[Any] = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=UpperCAmelCase__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Optional[int] = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
snake_case : int = config_and_inputs[0]
snake_case : Union[str, Any] = UMTaForConditionalGeneration(UpperCAmelCase__ ).eval()
model.to(UpperCAmelCase__ )
snake_case : str = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase__ , head_masking.items() ):
snake_case : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case : List[str] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ )
snake_case : Union[str, Any] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , **UpperCAmelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=UpperCAmelCase__ ).to(UpperCAmelCase__ )
snake_case : int = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=UpperCAmelCase__ , legacy=UpperCAmelCase__ )
snake_case : List[str] = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
snake_case : Dict = tokenizer(UpperCAmelCase__ , return_tensors='''pt''' , padding=UpperCAmelCase__ ).input_ids
# fmt: off
snake_case : Optional[Any] = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = model.generate(input_ids.to(UpperCAmelCase__ ) )
snake_case : int = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
snake_case : Tuple = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 84 | 0 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_a : Optional[List[str]] = None
_a : Tuple = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_a : int = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class a_ :
A__ : bool = True
A__ : Optional[str] = None
# Automatically constructed
A__ : ClassVar[str] = "PIL.Image.Image"
A__ : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
A__ : str = field(default='Image' , init=a , repr=a )
def __call__( self : int ):
"""simple docstring"""
return self.pa_type
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Optional[Any] = np.array(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return {"path": value, "bytes": None}
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return {"path": None, "bytes": value}
elif isinstance(UpperCAmelCase__ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(UpperCAmelCase__ )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : dict , UpperCAmelCase__ : Any=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
snake_case : Tuple = {}
snake_case : Tuple = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(F"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(UpperCAmelCase__ ):
snake_case : str = PIL.Image.open(UpperCAmelCase__ )
else:
snake_case : Dict = path.split('''::''' )[-1]
try:
snake_case : Union[str, Any] = string_to_dict(UpperCAmelCase__ , config.HUB_DATASETS_URL )['''repo_id''']
snake_case : str = token_per_repo_id.get(UpperCAmelCase__ )
except ValueError:
snake_case : str = None
with xopen(UpperCAmelCase__ , '''rb''' , use_auth_token=UpperCAmelCase__ ) as f:
snake_case : Any = BytesIO(f.read() )
snake_case : Dict = PIL.Image.open(bytes_ )
else:
snake_case : Any = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
snake_case : Any = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.binary() )
snake_case : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case : Union[str, Any] = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.string() )
snake_case : Tuple = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
snake_case : Dict = storage.field('''bytes''' )
else:
snake_case : int = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
snake_case : Tuple = storage.field('''path''' )
else:
snake_case : Dict = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.string() )
snake_case : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
snake_case : Union[str, Any] = pa.array(
[encode_np_array(np.array(UpperCAmelCase__ ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
snake_case : Optional[int] = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.string() )
snake_case : List[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase__ , self.pa_type )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase__ : List[Any] ):
with xopen(UpperCAmelCase__ , '''rb''' ) as f:
snake_case : str = f.read()
return bytes_
snake_case : int = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
snake_case : List[str] = pa.array(
[os.path.basename(UpperCAmelCase__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
snake_case : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase__ , self.pa_type )
def a_ ( ) -> List[str]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
snake_case : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def a_ ( __magic_name__ ) -> bytes:
"""simple docstring"""
snake_case : Optional[Any] = BytesIO()
if image.format in list_image_compression_formats():
snake_case : Tuple = image.format
else:
snake_case : List[str] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(__magic_name__ , format=__magic_name__ )
return buffer.getvalue()
def a_ ( __magic_name__ ) -> dict:
"""simple docstring"""
if hasattr(__magic_name__ , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__magic_name__ )}
def a_ ( __magic_name__ ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
snake_case : List[Any] = array.dtype
snake_case : int = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
snake_case : str = dtype.kind
snake_case : int = dtype.itemsize
snake_case : int = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
snake_case : Union[str, Any] = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
snake_case : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
snake_case : Optional[Any] = dtype_byteorder + dtype_kind + str(__magic_name__ )
snake_case : List[str] = np.dtype(__magic_name__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
snake_case : List[Any] = PIL.Image.fromarray(array.astype(__magic_name__ ) )
return {"path": None, "bytes": image_to_bytes(__magic_name__ )}
def a_ ( __magic_name__ ) -> List[dict]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
snake_case : List[str] = first_non_null_value(__magic_name__ )
if isinstance(__magic_name__ , __magic_name__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__magic_name__ , np.ndarray ):
snake_case : Any = no_op_if_value_is_null(__magic_name__ )
return [obj_to_image_dict_func(__magic_name__ ) for obj in objs]
elif isinstance(__magic_name__ , PIL.Image.Image ):
snake_case : Optional[Any] = no_op_if_value_is_null(__magic_name__ )
return [obj_to_image_dict_func(__magic_name__ ) for obj in objs]
else:
return objs
else:
return objs
| 709 |
import torch
from diffusers import DiffusionPipeline
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
def __call__( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
snake_case : Dict = 1
snake_case : Optional[Any] = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
snake_case : List[Any] = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
snake_case : List[Any] = scheduler_output - scheduler_output + torch.ones_like(UpperCAmelCase__ )
return result
| 84 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple=13 , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : List[str]=99 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : Optional[int]=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Any=None , ):
"""simple docstring"""
snake_case : Optional[Any] = parent
snake_case : Dict = batch_size
snake_case : List[str] = seq_length
snake_case : Dict = is_training
snake_case : List[Any] = use_token_type_ids
snake_case : List[str] = use_labels
snake_case : Union[str, Any] = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Dict = num_attention_heads
snake_case : Tuple = intermediate_size
snake_case : Dict = hidden_act
snake_case : List[str] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : List[str] = type_vocab_size
snake_case : Any = type_sequence_label_size
snake_case : Dict = initializer_range
snake_case : List[str] = num_labels
snake_case : Union[str, Any] = num_choices
snake_case : List[str] = scope
snake_case : Optional[Any] = self.vocab_size - 1
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : List[Any] = None
if self.use_token_type_ids:
snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Optional[Any] = None
snake_case : Tuple = None
snake_case : Optional[Any] = None
if self.use_labels:
snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Tuple = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
snake_case : List[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase( self : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , *UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : str = OpenAIGPTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Optional[Any] = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
snake_case : int = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
snake_case : Optional[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , *UpperCAmelCase__ : Tuple ):
"""simple docstring"""
snake_case : List[str] = OpenAIGPTLMHeadModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Any = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , *UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
snake_case : str = OpenAIGPTDoubleHeadsModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : int = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Dict = self.num_labels
snake_case : List[str] = OpenAIGPTForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : str = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : int = self.prepare_config_and_inputs()
(
snake_case
) : List[Any] = config_and_inputs
snake_case : Tuple = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class a_ ( a , a , a , unittest.TestCase ):
A__ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A__ : List[str] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A__ : List[str] = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]=False ):
"""simple docstring"""
snake_case : str = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ , )
snake_case : Dict = inputs_dict['''labels''']
snake_case : Optional[int] = inputs_dict['''labels''']
snake_case : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCAmelCase__ , )
snake_case : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[Any] = OpenAIGPTModelTester(self )
snake_case : int = ConfigTester(self , config_class=UpperCAmelCase__ , n_embd=37 )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCAmelCase__ )
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase__ )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Any = OpenAIGPTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
class a_ ( unittest.TestCase ):
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : List[Any] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(UpperCAmelCase__ )
snake_case : Optional[Any] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=UpperCAmelCase__ ) # the president is
snake_case : str = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case : Tuple = model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase__ )
| 710 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( a ):
A__ : List[str] = ['image_processor', 'tokenizer']
A__ : Any = 'CLIPImageProcessor'
A__ : Optional[int] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase__ , )
snake_case : List[Any] = kwargs.pop('''feature_extractor''' )
snake_case : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self : Any , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case : int = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if images is not None:
snake_case : Dict = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and images is not None:
snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : str ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : int = self.tokenizer.model_input_names
snake_case : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase__ , )
return self.image_processor_class
@property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase__ , )
return self.image_processor
| 84 | 0 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_a : List[str] = logging.get_logger(__name__)
_a : str = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_a : Union[str, Any] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
snake_case : Any = getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
snake_case : int = getattr(__magic_name__ , __magic_name__ ).shape
else:
snake_case : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
snake_case : Any = value
elif weight_type == "weight_g":
snake_case : List[Any] = value
elif weight_type == "weight_v":
snake_case : List[Any] = value
elif weight_type == "bias":
snake_case : Optional[Any] = value
else:
snake_case : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def a_ ( __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
snake_case : List[Any] = []
snake_case : Dict = fairseq_model.state_dict()
snake_case : List[str] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , )
snake_case : Any = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
snake_case : str = True
if "*" in mapped_key:
snake_case : str = name.split(__magic_name__ )[0].split('''.''' )[-2]
snake_case : Optional[Any] = mapped_key.replace('''*''' , __magic_name__ )
if "weight_g" in name:
snake_case : List[Any] = '''weight_g'''
elif "weight_v" in name:
snake_case : str = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
snake_case : Union[str, Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case : List[Any] = '''weight'''
else:
snake_case : Any = None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(F"Unused weights: {unused_weights}" )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : Any = full_name.split('''conv_layers.''' )[-1]
snake_case : Tuple = name.split('''.''' )
snake_case : Tuple = int(items[0] )
snake_case : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
snake_case : Optional[int] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
snake_case : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
snake_case : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
snake_case : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=None ) -> Any:
"""simple docstring"""
snake_case : Dict = torch.load(__magic_name__ )
snake_case : Union[str, Any] = WavLMConfigOrig(checkpoint['''cfg'''] )
snake_case : List[str] = WavLMOrig(__magic_name__ )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
snake_case : str = WavLMConfig.from_pretrained(__magic_name__ )
else:
snake_case : Optional[Any] = WavLMConfig()
snake_case : str = WavLMModel(__magic_name__ )
recursively_load_weights(__magic_name__ , __magic_name__ )
hf_wavlm.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_a : Dict = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 711 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_a : Optional[Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
_a : str = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
_a : List[Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Optional[int]=0.9 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : List[Any]=500 , UpperCAmelCase__ : Union[str, Any]="gpt2-large" , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : List[Any]=25 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=25 , ):
"""simple docstring"""
snake_case : List[str] = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 84 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_a : Any = None
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Dict = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_a : int = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
_a : List[Any] = {
'facebook/mbart-large-en-ro': 1_024,
'facebook/mbart-large-cc25': 1_024,
}
# fmt: off
_a : Dict = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class a_ ( a ):
A__ : Optional[int] = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : int = ['input_ids', 'attention_mask']
A__ : Union[str, Any] = MBartTokenizer
A__ : List[int] = []
A__ : List[int] = []
def __init__( self : Any , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Union[str, Any]="<s>" , UpperCAmelCase__ : str="</s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : str="<unk>" , UpperCAmelCase__ : str="<pad>" , UpperCAmelCase__ : str="<mask>" , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : str , ):
"""simple docstring"""
snake_case : List[str] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
super().__init__(
vocab_file=UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case : List[str] = vocab_file
snake_case : Tuple = False if not self.vocab_file else True
snake_case : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
snake_case : Tuple = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case : Union[str, Any] = src_lang if src_lang is not None else '''en_XX'''
snake_case : Optional[int] = self.convert_tokens_to_ids(self._src_lang )
snake_case : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : str = [self.sep_token_id]
snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] , **UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
snake_case : str = src_lang
snake_case : Optional[int] = self(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
snake_case : str = self.convert_tokens_to_ids(UpperCAmelCase__ )
snake_case : str = tgt_lang_id
return inputs
def lowerCAmelCase( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str = "en_XX" , UpperCAmelCase__ : Optional[List[str]] = None , UpperCAmelCase__ : str = "ro_RO" , **UpperCAmelCase__ : Any , ):
"""simple docstring"""
snake_case : List[str] = src_lang
snake_case : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
snake_case : Tuple = self.convert_tokens_to_ids(UpperCAmelCase__ )
snake_case : Union[str, Any] = []
snake_case : Optional[int] = [self.eos_token_id, self.cur_lang_code]
snake_case : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : str = self.convert_tokens_to_ids(UpperCAmelCase__ )
snake_case : Optional[Any] = []
snake_case : List[str] = [self.eos_token_id, self.cur_lang_code]
snake_case : Any = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
snake_case : Any = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 712 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
if "cls_token" in name:
snake_case : Tuple = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
snake_case : Optional[int] = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
snake_case : List[str] = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case : List[Any] = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case : int = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
snake_case : int = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
snake_case : Optional[Any] = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
snake_case : str = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case : Any = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
snake_case : Dict = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
snake_case : Dict = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
snake_case : Dict = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
snake_case : Optional[int] = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
snake_case : List[str] = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def a_ ( __magic_name__ , __magic_name__ ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case : Union[str, Any] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
snake_case : Optional[int] = key.split('''.''' )
snake_case : int = int(key_split[1] )
if "decoder_blocks" in key:
snake_case : List[str] = config.decoder_hidden_size
snake_case : List[Any] = '''decoder.decoder_layers.'''
if "weight" in key:
snake_case : str = val[:dim, :]
snake_case : Optional[Any] = val[dim : dim * 2, :]
snake_case : Any = val[-dim:, :]
elif "bias" in key:
snake_case : Optional[Any] = val[:dim]
snake_case : List[Any] = val[dim : dim * 2]
snake_case : List[Any] = val[-dim:]
else:
snake_case : Optional[int] = config.hidden_size
snake_case : Tuple = '''vit.encoder.layer.'''
if "weight" in key:
snake_case : Optional[Any] = val[:dim, :]
snake_case : str = val[dim : dim * 2, :]
snake_case : Union[str, Any] = val[-dim:, :]
elif "bias" in key:
snake_case : Tuple = val[:dim]
snake_case : int = val[dim : dim * 2]
snake_case : Optional[Any] = val[-dim:]
else:
snake_case : Optional[Any] = val
return orig_state_dict
def a_ ( __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : List[str] = ViTMAEConfig()
if "large" in checkpoint_url:
snake_case : str = 1_024
snake_case : Tuple = 4_096
snake_case : Optional[Any] = 24
snake_case : List[Any] = 16
elif "huge" in checkpoint_url:
snake_case : Tuple = 14
snake_case : int = 1_280
snake_case : Dict = 5_120
snake_case : Tuple = 32
snake_case : Optional[Any] = 16
snake_case : Optional[Any] = ViTMAEForPreTraining(__magic_name__ )
snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''model''']
snake_case : int = ViTMAEImageProcessor(size=config.image_size )
snake_case : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
snake_case : Tuple = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
snake_case : List[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
snake_case : Dict = ViTMAEImageProcessor(size=config.image_size )
snake_case : str = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
snake_case : Union[str, Any] = model(**__magic_name__ )
snake_case : Optional[Any] = outputs.logits
if "large" in checkpoint_url:
snake_case : Any = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
snake_case : List[Any] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
snake_case : Dict = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a : str = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 84 | 0 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
snake_case : str = 0
snake_case : Any = str(__magic_name__ )
while len(__magic_name__ ) != 1:
snake_case : int = [int(__magic_name__ ) for i in num_string]
snake_case : Tuple = 1
for i in range(0 , len(__magic_name__ ) ):
total *= numbers[i]
snake_case : List[Any] = str(__magic_name__ )
steps += 1
return steps
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
snake_case : str = 0
snake_case : Tuple = str(__magic_name__ )
while len(__magic_name__ ) != 1:
snake_case : Any = [int(__magic_name__ ) for i in num_string]
snake_case : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
total += numbers[i]
snake_case : List[str] = str(__magic_name__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : Optional[Any] = 16
_a : Union[str, Any] = 32
def a_ ( __magic_name__ , __magic_name__ = 16 ) -> Dict:
"""simple docstring"""
snake_case : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case : Union[str, Any] = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case : Dict = 8
else:
snake_case : Union[str, Any] = None
return tokenizer.pad(
__magic_name__ , padding='''longest''' , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
snake_case : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : Optional[int] = mocked_dataloaders # noqa: F811
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __magic_name__ ) == "1":
snake_case : Optional[int] = 2
# Initialize accelerator
snake_case : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : Dict = config['''lr''']
snake_case : Any = int(config['''num_epochs'''] )
snake_case : List[str] = int(config['''seed'''] )
snake_case : List[Any] = int(config['''batch_size'''] )
snake_case : Tuple = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : str = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case : Optional[int] = AdamW(params=model.parameters() , lr=__magic_name__ )
snake_case , snake_case : List[Any] = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate scheduler
snake_case : int = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case : Tuple = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case : int = model(**__magic_name__ )
snake_case : Optional[int] = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case : List[str] = model(**__magic_name__ )
snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
snake_case , snake_case : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
snake_case : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__magic_name__ , default=__magic_name__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case : Optional[Any] = parser.parse_args()
snake_case : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 84 | 0 |
from collections.abc import Callable
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> float:
"""simple docstring"""
snake_case : float = a
snake_case : float = b
if function(__magic_name__ ) == 0: # one of the a or b is a root for the function
return a
elif function(__magic_name__ ) == 0:
return b
elif (
function(__magic_name__ ) * function(__magic_name__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
snake_case : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__magic_name__ ) == 0:
return mid
elif function(__magic_name__ ) * function(__magic_name__ ) < 0:
snake_case : str = mid
else:
snake_case : Dict = mid
snake_case : Union[str, Any] = start + (end - start) / 2.0
return mid
def a_ ( __magic_name__ ) -> float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 714 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_a : Dict = logging.get_logger(__name__)
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = None ) -> str:
"""simple docstring"""
snake_case : Any = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
snake_case : str = to_pil_image(__magic_name__ )
snake_case , snake_case : Union[str, Any] = pil_image.size
snake_case : List[Any] = pytesseract.image_to_data(__magic_name__ , lang=__magic_name__ , output_type='''dict''' , config=__magic_name__ )
snake_case , snake_case , snake_case , snake_case , snake_case : Optional[Any] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
snake_case : Union[str, Any] = [idx for idx, word in enumerate(__magic_name__ ) if not word.strip()]
snake_case : Union[str, Any] = [word for idx, word in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : Optional[Any] = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
snake_case : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case : List[Any] = []
for x, y, w, h in zip(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
snake_case : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(__magic_name__ )
# finally, normalize the bounding boxes
snake_case : List[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__magic_name__ , __magic_name__ , __magic_name__ ) )
assert len(__magic_name__ ) == len(__magic_name__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a_ ( a ):
A__ : int = ['pixel_values']
def __init__( self : Optional[int] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "" , **UpperCAmelCase__ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : Any = size if size is not None else {'''height''': 224, '''width''': 224}
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : Dict = do_resize
snake_case : str = size
snake_case : Optional[int] = resample
snake_case : Union[str, Any] = apply_ocr
snake_case : int = ocr_lang
snake_case : Union[str, Any] = tesseract_config
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ):
"""simple docstring"""
snake_case : Dict = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
snake_case : Tuple = (size['''height'''], size['''width'''])
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : List[Any] , ):
"""simple docstring"""
snake_case : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case : List[Any] = size if size is not None else self.size
snake_case : Tuple = get_size_dict(UpperCAmelCase__ )
snake_case : str = resample if resample is not None else self.resample
snake_case : Optional[int] = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case : List[str] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
snake_case : Any = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
snake_case : Optional[int] = []
snake_case : Union[str, Any] = []
for image in images:
snake_case , snake_case : List[Any] = apply_tesseract(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
words_batch.append(UpperCAmelCase__ )
boxes_batch.append(UpperCAmelCase__ )
if do_resize:
snake_case : Any = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case : int = [flip_channel_order(UpperCAmelCase__ ) for image in images]
snake_case : Optional[Any] = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
snake_case : List[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase__ )
if apply_ocr:
snake_case : Dict = words_batch
snake_case : Dict = boxes_batch
return data
| 84 | 0 |
'''simple docstring'''
import argparse
import datetime
def a_ ( __magic_name__ ) -> str:
"""simple docstring"""
snake_case : Any = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
snake_case : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__magic_name__ ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
snake_case : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
snake_case : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
snake_case : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
snake_case : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
snake_case : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
snake_case : Tuple = datetime.date(int(__magic_name__ ) , int(__magic_name__ ) , int(__magic_name__ ) )
# Start math
if m <= 2:
snake_case : str = y - 1
snake_case : Dict = m + 12
# maths var
snake_case : int = int(str(__magic_name__ )[:2] )
snake_case : int = int(str(__magic_name__ )[2:] )
snake_case : int = int(2.6 * m - 5.39 )
snake_case : int = int(c / 4 )
snake_case : int = int(k / 4 )
snake_case : int = int(d + k )
snake_case : int = int(t + u + v + x )
snake_case : int = int(z - (2 * c) )
snake_case : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
snake_case : str = F"Your date {date_input}, is a {days[str(__magic_name__ )]}!"
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : Tuple = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
_a : List[Any] = parser.parse_args()
zeller(args.date_input)
| 715 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[Any]=24 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Optional[int]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Optional[Any]=2 , ):
"""simple docstring"""
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : str = patch_size
snake_case : Union[str, Any] = max_length
snake_case : str = num_mel_bins
snake_case : Any = is_training
snake_case : Union[str, Any] = use_labels
snake_case : Tuple = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : str = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : str = scope
snake_case : int = frequency_stride
snake_case : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
snake_case : Any = (self.max_length - self.patch_size) // self.time_stride + 1
snake_case : Union[str, Any] = frequency_out_dimension * time_out_dimension
snake_case : Union[str, Any] = num_patches + 2
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
snake_case : str = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[str] = self.get_config()
return config, input_values, labels
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : str = ASTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : int = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : int = config_and_inputs
snake_case : Tuple = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class a_ ( a , a , unittest.TestCase ):
A__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
A__ : int = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Dict = False
A__ : int = False
A__ : Optional[int] = False
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[int] = ASTModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Any = model_class(UpperCAmelCase__ )
snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : List[str] = ['''input_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = ASTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Dict = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
snake_case , snake_case : int = torchaudio.load(__magic_name__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class a_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : List[str] = self.default_feature_extractor
snake_case : str = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(UpperCAmelCase__ )
snake_case : str = self.default_feature_extractor
snake_case , snake_case : int = prepare_audio()
snake_case : Optional[int] = audio.squeeze().numpy()
snake_case : Optional[Any] = feature_extractor(UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case : Union[str, Any] = model(**UpperCAmelCase__ )
# verify the logits
snake_case : Any = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
snake_case : str = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 84 | 0 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
"""simple docstring"""
if gpta_config_file == "":
snake_case : List[Any] = GPTaConfig()
else:
snake_case : str = GPTaConfig.from_json_file(__magic_name__ )
snake_case : str = GPTaModel(__magic_name__ )
# Load weights from numpy
load_tf_weights_in_gpta(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
snake_case : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , __magic_name__ )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
_a : Optional[int] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 716 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : Union[str, Any] = logging.getLogger(__name__)
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class a_ :
A__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A__ : Optional[str] = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class a_ :
A__ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
A__ : str = field(metadata={'help': 'Should contain the data files for the task.'} )
A__ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A__ : bool = field(
default=a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case , snake_case , snake_case : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __magic_name__ )
# Set seed
set_seed(training_args.seed )
try:
snake_case : int = processors[data_args.task_name]()
snake_case : List[str] = processor.get_labels()
snake_case : str = len(__magic_name__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case : Optional[int] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__magic_name__ ) -> Dict:
snake_case : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__magic_name__ , p.label_ids )}
# Data collator
snake_case : Dict = DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case : List[Any] = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case : Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case : Optional[Any] = trainer.evaluate()
snake_case : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__magic_name__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__magic_name__ )
return results
def a_ ( __magic_name__ ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 84 | 0 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
class a_ ( a ):
A__ : int = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Optional[int]="<pad>" , UpperCAmelCase__ : int=125 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Dict , ):
"""simple docstring"""
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case : Optional[Any] = [F"<extra_id_{i}>" for i in range(UpperCAmelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case : List[str] = len(set(filter(lambda UpperCAmelCase__ : bool('''extra_id''' in str(UpperCAmelCase__ ) ) , UpperCAmelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''' )
snake_case : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else pad_token
snake_case : str = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else eos_token
snake_case : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else unk_token
super().__init__(
eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , extra_ids=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case : Optional[Any] = extra_ids
snake_case : Optional[int] = 2**8 # utf is 8 bits
# define special tokens dict
snake_case : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
snake_case : List[str] = len(self.special_tokens_encoder )
snake_case : Tuple = len(UpperCAmelCase__ )
for i, token in enumerate(UpperCAmelCase__ ):
snake_case : str = self.vocab_size + i - n
snake_case : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCAmelCase__ )) + [1]
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1]
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : List[int] ):
"""simple docstring"""
if len(UpperCAmelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : Any = self._add_eos_if_not_present(UpperCAmelCase__ )
if token_ids_a is None:
return token_ids_a
else:
snake_case : Tuple = self._add_eos_if_not_present(UpperCAmelCase__ )
return token_ids_a + token_ids_a
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = [chr(UpperCAmelCase__ ) for i in text.encode('''utf-8''' )]
return tokens
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
if token in self.special_tokens_encoder:
snake_case : List[Any] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
snake_case : Union[str, Any] = self.added_tokens_encoder[token]
elif len(UpperCAmelCase__ ) != 1:
snake_case : Optional[Any] = self.unk_token_id
else:
snake_case : Dict = ord(UpperCAmelCase__ ) + self._num_special_tokens
return token_id
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
if index in self.special_tokens_decoder:
snake_case : Optional[int] = self.special_tokens_decoder[index]
else:
snake_case : List[Any] = chr(index - self._num_special_tokens )
return token
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
snake_case : List[str] = b''''''
for token in tokens:
if token in self.special_tokens_decoder:
snake_case : Dict = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.added_tokens_decoder:
snake_case : Any = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.special_tokens_encoder:
snake_case : Optional[int] = token.encode('''utf-8''' )
elif token in self.added_tokens_encoder:
snake_case : int = token.encode('''utf-8''' )
else:
snake_case : str = bytes([ord(UpperCAmelCase__ )] )
bstring += tok_string
snake_case : Union[str, Any] = bstring.decode('''utf-8''' , errors='''ignore''' )
return string
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
return ()
| 717 |
import re
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
snake_case : List[str] = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
_a : Any = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 84 | 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_a : Optional[Any] = sys.version_info >= (3, 10)
def a_ ( __magic_name__=None , __magic_name__=None ) -> Dict:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__magic_name__ )
@dataclass
class a_ :
A__ : int
A__ : float
A__ : str
A__ : bool
@dataclass
class a_ :
A__ : int = 42
A__ : str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class a_ :
A__ : bool = False
A__ : bool = True
A__ : Optional[bool] = None
class a_ ( a ):
A__ : Tuple = 'titi'
A__ : Tuple = 'toto'
class a_ ( a ):
A__ : int = 'titi'
A__ : Dict = 'toto'
A__ : List[Any] = 42
@dataclass
class a_ :
A__ : BasicEnum = "toto"
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : int = BasicEnum(self.foo )
@dataclass
class a_ :
A__ : MixedTypeEnum = "toto"
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class a_ :
A__ : Optional[int] = None
A__ : Optional[float] = field(default=a , metadata={'help': 'help message'} )
A__ : Optional[str] = None
A__ : Optional[List[str]] = list_field(default=[] )
A__ : Optional[List[int]] = list_field(default=[] )
@dataclass
class a_ :
A__ : List[int] = list_field(default=[] )
A__ : List[int] = list_field(default=[1, 2, 3] )
A__ : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
A__ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class a_ :
A__ : List[int] = field()
A__ : str = field()
A__ : BasicEnum = field()
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[int] = BasicEnum(self.required_enum )
@dataclass
class a_ :
A__ : int
A__ : "BasicEnum" = field()
A__ : "Optional[bool]" = None
A__ : "str" = field(default='toto' , metadata={'help': 'help message'} )
A__ : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class a_ :
A__ : bool = False
A__ : bool = True
A__ : bool | None = None
@dataclass
class a_ :
A__ : int | None = None
A__ : float | None = field(default=a , metadata={'help': 'help message'} )
A__ : str | None = None
A__ : list[str] | None = list_field(default=[] )
A__ : list[int] | None = list_field(default=[] )
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : argparse.ArgumentParser , UpperCAmelCase__ : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case : Optional[int] = {k: v for k, v in vars(UpperCAmelCase__ ).items() if k != '''container'''}
snake_case : Tuple = {k: v for k, v in vars(UpperCAmelCase__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , UpperCAmelCase__ ) and yy.get('''choices''' , UpperCAmelCase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](UpperCAmelCase__ ) , yy['''type'''](UpperCAmelCase__ ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[str] = HfArgumentParser(UpperCAmelCase__ )
snake_case : Dict = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument('''--bar''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument('''--baz''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument('''--flag''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs='''?''' )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Dict = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
(snake_case ) : List[str] = parser.parse_args_into_dataclasses(UpperCAmelCase__ , look_for_args_file=UpperCAmelCase__ )
self.assertFalse(example.flag )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = HfArgumentParser(UpperCAmelCase__ )
snake_case : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=UpperCAmelCase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=UpperCAmelCase__ , help='''help message''' )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Tuple = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs='''?''' )
expected.add_argument('''--baz''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=UpperCAmelCase__ , dest='''baz''' )
expected.add_argument('''--opt''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ )
snake_case : int = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase__ )
for dataclass_type in dataclass_types:
snake_case : int = HfArgumentParser(UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = parser.parse_args([] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
snake_case : Tuple = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
snake_case : Optional[Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
snake_case : Union[str, Any] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
snake_case : str = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Optional[Any] = HfArgumentParser(UpperCAmelCase__ )
snake_case : Optional[Any] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case : int = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case : Optional[Any] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case : Union[str, Any] = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
snake_case : Optional[int] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
@dataclass
class a_ :
A__ : Literal["titi", "toto", 42] = "toto"
snake_case : List[Any] = HfArgumentParser(UpperCAmelCase__ )
snake_case : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : int = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case : Dict = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case : Tuple = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : str = HfArgumentParser(UpperCAmelCase__ )
snake_case : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=UpperCAmelCase__ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=UpperCAmelCase__ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=UpperCAmelCase__ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Union[str, Any] = parser.parse_args([] )
self.assertEqual(
UpperCAmelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case : Optional[int] = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(UpperCAmelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=UpperCAmelCase__ , type=UpperCAmelCase__ )
expected.add_argument('''--bar''' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , help='''help message''' )
expected.add_argument('''--baz''' , default=UpperCAmelCase__ , type=UpperCAmelCase__ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=UpperCAmelCase__ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=UpperCAmelCase__ )
snake_case : List[str] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase__ )
for dataclass_type in dataclass_types:
snake_case : Dict = HfArgumentParser(UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = parser.parse_args([] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , bar=UpperCAmelCase__ , baz=UpperCAmelCase__ , ces=[] , des=[] ) )
snake_case : Dict = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[int] = HfArgumentParser(UpperCAmelCase__ )
snake_case : str = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument('''--required_str''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=UpperCAmelCase__ , )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : List[str] = HfArgumentParser(UpperCAmelCase__ )
snake_case : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=UpperCAmelCase__ , )
expected.add_argument('''--opt''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=UpperCAmelCase__ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[Any] = HfArgumentParser(UpperCAmelCase__ )
snake_case : Dict = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
snake_case : str = parser.parse_dict(UpperCAmelCase__ )[0]
snake_case : Tuple = BasicExample(**UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Union[str, Any] = HfArgumentParser(UpperCAmelCase__ )
snake_case : Optional[int] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(UpperCAmelCase__ , parser.parse_dict , UpperCAmelCase__ , allow_extra_keys=UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Dict = HfArgumentParser(UpperCAmelCase__ )
snake_case : Any = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : List[str] = os.path.join(UpperCAmelCase__ , '''temp_json''' )
os.mkdir(UpperCAmelCase__ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Tuple = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
snake_case : Dict = BasicExample(**UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Dict = HfArgumentParser(UpperCAmelCase__ )
snake_case : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : List[str] = os.path.join(UpperCAmelCase__ , '''temp_yaml''' )
os.mkdir(UpperCAmelCase__ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Any = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
snake_case : int = BasicExample(**UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : Any = HfArgumentParser(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
| 718 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : int=18 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Optional[int]=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=True , ):
"""simple docstring"""
snake_case : int = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = num_channels
snake_case : Union[str, Any] = image_size
snake_case : Dict = min_resolution
snake_case : Dict = max_resolution
snake_case : int = do_resize
snake_case : List[str] = size
snake_case : List[Any] = apply_ocr
def lowerCAmelCase( self : int ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a_ ( a , unittest.TestCase ):
A__ : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''apply_ocr''' ) )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
# Initialize image_processing
snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
snake_case : Dict = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : List[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : Tuple = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# with apply_OCR = True
snake_case : int = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case : List[Any] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
snake_case : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
snake_case : Any = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case : Optional[Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
snake_case : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
snake_case : str = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
snake_case : Optional[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 84 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a_ ( a , unittest.TestCase ):
A__ : Union[str, Any] = LDMTextToImagePipeline
A__ : Dict = TEXT_TO_IMAGE_PARAMS - {
'negative_prompt',
'negative_prompt_embeds',
'cross_attention_kwargs',
'prompt_embeds',
}
A__ : List[Any] = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'callback',
'callback_steps',
}
A__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
A__ : Any = False
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
snake_case : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
snake_case : List[Any] = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , )
torch.manual_seed(0 )
snake_case : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case : Tuple = CLIPTextModel(UpperCAmelCase__ )
snake_case : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vqvae''': vae,
'''bert''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=0 ):
"""simple docstring"""
if str(UpperCAmelCase__ ).startswith('''mps''' ):
snake_case : List[Any] = torch.manual_seed(UpperCAmelCase__ )
else:
snake_case : Optional[Any] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
snake_case : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case : Optional[Any] = self.get_dummy_components()
snake_case : Optional[Any] = LDMTextToImagePipeline(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : List[str] = self.get_dummy_inputs(UpperCAmelCase__ )
snake_case : Tuple = pipe(**UpperCAmelCase__ ).images
snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
snake_case : Union[str, Any] = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=torch.floataa , UpperCAmelCase__ : int=0 ):
"""simple docstring"""
snake_case : Any = torch.manual_seed(UpperCAmelCase__ )
snake_case : str = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 32, 32) )
snake_case : str = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
snake_case : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : Any = self.get_inputs(UpperCAmelCase__ )
snake_case : Dict = pipe(**UpperCAmelCase__ ).images
snake_case : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
snake_case : List[Any] = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] )
snake_case : str = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]=torch.floataa , UpperCAmelCase__ : Union[str, Any]=0 ):
"""simple docstring"""
snake_case : Tuple = torch.manual_seed(UpperCAmelCase__ )
snake_case : List[Any] = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 32, 32) )
snake_case : str = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
snake_case : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[Any] = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case : List[Any] = self.get_inputs(UpperCAmelCase__ )
snake_case : List[str] = pipe(**UpperCAmelCase__ ).images[0]
snake_case : str = load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
snake_case : Any = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 719 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=8 ) -> str:
"""simple docstring"""
snake_case : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if latents is None:
snake_case : int = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case : Optional[Any] = latents.to(UpperCAmelCase__ )
snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case : Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
snake_case : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Optional[int] = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
snake_case : Optional[int] = self._execution_device
snake_case : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Any = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : int = torch.cat(UpperCAmelCase__ , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Dict = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Tuple = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
snake_case : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
snake_case : str = self.scheduler.timesteps
snake_case : Optional[Any] = self.movq.config.latent_channels
snake_case , snake_case : Optional[Any] = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
snake_case : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Optional[int] = {'''image_embeds''': image_embeds, '''hint''': hint}
snake_case : Any = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Any = noise_pred.chunk(2 )
snake_case , snake_case : Dict = variance_pred.chunk(2 )
snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
snake_case : List[Any] = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case : Optional[Any] = image * 0.5 + 0.5
snake_case : int = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : str = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 0 |
from __future__ import annotations
def a_ ( __magic_name__ ) -> bool:
"""simple docstring"""
snake_case : List[Any] = str(__magic_name__ )
return n == n[::-1]
def a_ ( __magic_name__ = 1_000_000 ) -> List[Any]:
"""simple docstring"""
snake_case : Tuple = 0
for i in range(1 , __magic_name__ ):
if is_palindrome(__magic_name__ ) and is_palindrome(bin(__magic_name__ ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 720 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a_ ( a , unittest.TestCase ):
A__ : Dict = ReformerTokenizer
A__ : Optional[int] = ReformerTokenizerFast
A__ : str = True
A__ : Tuple = False
A__ : str = True
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
super().setUp()
snake_case : str = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = '''<s>'''
snake_case : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1_000 )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case : Any = self.get_tokenizer()
snake_case : str = self.get_rust_tokenizer()
snake_case : Tuple = '''I was born in 92000, and this is falsé.'''
snake_case : str = tokenizer.tokenize(UpperCAmelCase__ )
snake_case : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
snake_case : List[str] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : Optional[int] = tokenizer.encode(UpperCAmelCase__ )
snake_case : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any]=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : str = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# Simple input
snake_case : Union[str, Any] = '''This is a simple input'''
snake_case : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case : int = ('''This is a simple input''', '''This is a pair''')
snake_case : int = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
snake_case : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
snake_case : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case : int = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Any = '''Hello World!'''
snake_case : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case : Union[str, Any] = ''' '''.join(UpperCAmelCase__ )
snake_case : Optional[int] = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' )
snake_case : List[str] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
snake_case : Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case : Tuple = encoded_sequence['''input_ids'''].shape
snake_case : List[Any] = ReformerModel(UpperCAmelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
# fmt: off
snake_case : Tuple = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case : Tuple = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=UpperCAmelCase__ , sequences=UpperCAmelCase__ , )
| 84 | 0 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ ( a ):
A__ : List[Any] = 'new-model'
if is_tf_available():
class a_ ( a ):
A__ : str = NewModelConfig
@require_tf
class a_ ( unittest.TestCase ):
@slow
def lowerCAmelCase( self : str ):
snake_case : Optional[int] = '''bert-base-cased'''
snake_case : Dict = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = TFAutoModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : Union[str, Any] ):
snake_case : Tuple = '''bert-base-cased'''
snake_case : List[Any] = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : str ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[Any] = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase__ )
snake_case : List[str] = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[Any] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[Any] = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : str = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Any = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Tuple = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase__ )
snake_case : Any = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : int = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ )
snake_case : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case : Any = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : int ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case : Tuple = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : int = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
@require_tensorflow_probability
def lowerCAmelCase( self : Optional[int] ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
snake_case : Tuple = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Optional[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCAmelCase__ )
snake_case : Tuple = TFAutoModelForTableQuestionAnswering.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : int ):
snake_case : int = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 14_410 )
def lowerCAmelCase( self : Any ):
snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 14_410 )
def lowerCAmelCase( self : Optional[int] ):
snake_case : int = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Dict = copy.deepcopy(model.config )
snake_case : str = ['''FunnelBaseModel''']
snake_case : Dict = TFAutoModel.from_config(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase__ )
snake_case : Any = TFAutoModel.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] ):
try:
AutoConfig.register('''new-model''' , UpperCAmelCase__ )
snake_case : Optional[Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(UpperCAmelCase__ ):
auto_class.register(UpperCAmelCase__ , UpperCAmelCase__ )
auto_class.register(UpperCAmelCase__ , UpperCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase__ ):
auto_class.register(UpperCAmelCase__ , UpperCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case : Dict = BertModelTester(self ).get_config()
snake_case : Union[str, Any] = NewModelConfig(**tiny_config.to_dict() )
snake_case : List[Any] = auto_class.from_config(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase__ )
snake_case : int = auto_class.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowerCAmelCase( self : Optional[Any] ):
with self.assertRaisesRegex(
UpperCAmelCase__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
snake_case : int = TFAutoModel.from_pretrained('''bert-base''' )
def lowerCAmelCase( self : Optional[Any] ):
with self.assertRaisesRegex(
UpperCAmelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case : str = TFAutoModel.from_pretrained(UpperCAmelCase__ , revision='''aaaaaa''' )
def lowerCAmelCase( self : List[Any] ):
with self.assertRaisesRegex(
UpperCAmelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
snake_case : List[str] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowerCAmelCase( self : Tuple ):
with self.assertRaisesRegex(UpperCAmelCase__ , '''Use `from_pt=True` to load this model''' ):
snake_case : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def lowerCAmelCase( self : str ):
snake_case : Union[str, Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
snake_case : Dict = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
snake_case : Tuple = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
snake_case : Optional[int] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 721 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case , snake_case : Any = image.size
snake_case , snake_case : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case : Dict = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
snake_case : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case : Tuple = torch.from_numpy(__magic_name__ )
return 2.0 * image - 1.0
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : VQModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[int] = 100 , UpperCAmelCase__ : Optional[float] = 0.0 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[int] = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
snake_case : Any = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase__ )}" )
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[Any] = preprocess(UpperCAmelCase__ )
snake_case , snake_case : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case : str = next(self.unet.parameters() ).dtype
snake_case : Dict = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
snake_case : Any = image.to(device=self.device , dtype=UpperCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device )
snake_case : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_eta:
snake_case : Dict = eta
for t in self.progress_bar(UpperCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
snake_case : Optional[int] = torch.cat([latents, image] , dim=1 )
snake_case : str = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
snake_case : int = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : Any = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
snake_case : Optional[int] = self.vqvae.decode(UpperCAmelCase__ ).sample
snake_case : int = torch.clamp(UpperCAmelCase__ , -1.0 , 1.0 )
snake_case : Dict = image / 2 + 0.5
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Any = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "AutoImageProcessor"
__lowerCamelCase = "AutoTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
lowercase__ : List[Any]= self.image_processor
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
lowercase__ : Any= image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 85 |
"""simple docstring"""
a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ):
lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ : Tuple= len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ : str= b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ : str= (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ : Optional[Any]= encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ : List[Any]= encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ : str= encoded_data[:-padding]
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ : Any= [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.