code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def a__ ( _UpperCamelCase : int ):
if "cls_token" in name:
__lowerCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
__lowerCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
__lowerCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
__lowerCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
__lowerCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
__lowerCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
__lowerCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
__lowerCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
__lowerCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
__lowerCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
__lowerCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
__lowerCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
__lowerCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
__lowerCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
__lowerCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Optional[Any] ):
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(_UpperCamelCase )
if "qkv" in key:
__lowerCamelCase = key.split('''.''' )
__lowerCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
__lowerCamelCase = config.decoder_hidden_size
__lowerCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
elif "bias" in key:
__lowerCamelCase = val[:dim]
__lowerCamelCase = val[dim : dim * 2]
__lowerCamelCase = val[-dim:]
else:
__lowerCamelCase = config.hidden_size
__lowerCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
elif "bias" in key:
__lowerCamelCase = val[:dim]
__lowerCamelCase = val[dim : dim * 2]
__lowerCamelCase = val[-dim:]
else:
__lowerCamelCase = val
return orig_state_dict
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Dict ):
__lowerCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
__lowerCamelCase = 10_24
__lowerCamelCase = 40_96
__lowerCamelCase = 24
__lowerCamelCase = 16
elif "huge" in checkpoint_url:
__lowerCamelCase = 14
__lowerCamelCase = 12_80
__lowerCamelCase = 51_20
__lowerCamelCase = 32
__lowerCamelCase = 16
__lowerCamelCase = ViTMAEForPreTraining(_UpperCamelCase )
__lowerCamelCase = torch.hub.load_state_dict_from_url(_UpperCamelCase ,map_location='''cpu''' )['''model''']
__lowerCamelCase = ViTMAEImageProcessor(size=config.image_size )
__lowerCamelCase = convert_state_dict(_UpperCamelCase ,_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
__lowerCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
__lowerCamelCase = Image.open(requests.get(_UpperCamelCase ,stream=_UpperCamelCase ).raw )
__lowerCamelCase = ViTMAEImageProcessor(size=config.image_size )
__lowerCamelCase = image_processor(images=_UpperCamelCase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits
if "large" in checkpoint_url:
__lowerCamelCase = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
__lowerCamelCase = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
__lowerCamelCase = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_UpperCamelCase ,atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 713 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1024 , __UpperCAmelCase=1024 , __UpperCAmelCase=3.6 ):
'''simple docstring'''
__lowerCamelCase = tokenizer
__lowerCamelCase = tokenizer.bos_token_id
__lowerCamelCase = dataset
__lowerCamelCase = seq_length
__lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
'''simple docstring'''
__lowerCamelCase = iter(self.dataset )
__lowerCamelCase = True
while more_examples:
__lowerCamelCase ,__lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__UpperCAmelCase )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
__lowerCamelCase = False
break
__lowerCamelCase = tokenizer(__UpperCAmelCase , truncation=__UpperCAmelCase )['''input_ids''']
__lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__UpperCAmelCase ) , self.seq_length ):
__lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(__UpperCAmelCase ) == self.seq_length:
yield torch.tensor(__UpperCAmelCase )
def a__ ( _UpperCamelCase : List[Any] ):
__lowerCamelCase = {'''streaming''': True}
__lowerCamelCase = load_dataset(args.dataset_name ,split='''train''' ,**_UpperCamelCase )
__lowerCamelCase = ConstantLengthDataset(_UpperCamelCase ,_UpperCamelCase ,seq_length=args.seq_length )
__lowerCamelCase = DataLoader(_UpperCamelCase ,batch_size=args.batch_size )
return eval_dataloader
def a__ ( _UpperCamelCase : str ):
model.eval()
__lowerCamelCase = []
for step, batch in enumerate(_UpperCamelCase ):
with torch.no_grad():
__lowerCamelCase = model(_UpperCamelCase ,labels=_UpperCamelCase )
__lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__lowerCamelCase = torch.mean(torch.cat(_UpperCamelCase ) )
try:
__lowerCamelCase = torch.exp(_UpperCamelCase )
except OverflowError:
__lowerCamelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
a_ = Accelerator()
# Parse configuration
a_ = HfArgumentParser(EvaluationArguments)
a_ = parser.parse_args()
set_seed(args.seed)
# Logging
a_ = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
a_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
a_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
a_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
a_ , a_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
a_ , a_ = evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 622 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
a_ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
lowerCAmelCase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowerCAmelCase__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class __lowerCAmelCase :
lowerCAmelCase__ = field(default=lowerCAmelCase__ , metadata={"""help""": """The input training data file (a text file)."""} )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowerCAmelCase__ = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCamelCase ( self ):
'''simple docstring'''
if self.train_file is not None:
__lowerCamelCase = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__lowerCamelCase = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowerCAmelCase :
lowerCAmelCase__ = 4_2
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __call__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
__lowerCamelCase = [feature.pop(__UpperCAmelCase ) for feature in features]
__lowerCamelCase = len(__UpperCAmelCase )
__lowerCamelCase = len(features[0]['''input_ids'''] )
__lowerCamelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(__UpperCAmelCase )] for feature in features
]
__lowerCamelCase = list(chain(*__UpperCAmelCase ) )
__lowerCamelCase = self.tokenizer.pad(
__UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
__lowerCamelCase = {k: v.view(__UpperCAmelCase , __UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__lowerCamelCase = torch.tensor(__UpperCAmelCase , dtype=torch.intaa )
return batch
def a__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' ,_UpperCamelCase ,_UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCamelCase = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
datasets.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__lowerCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__lowerCamelCase = {}
if data_args.train_file is not None:
__lowerCamelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCamelCase = data_args.validation_file
__lowerCamelCase = data_args.train_file.split('''.''' )[-1]
__lowerCamelCase = load_dataset(
_UpperCamelCase ,data_files=_UpperCamelCase ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
# Downloading and loading the swag dataset from the hub.
__lowerCamelCase = load_dataset(
'''swag''' ,'''regular''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
__lowerCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
__lowerCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=_UpperCamelCase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__lowerCamelCase = [F"""ending{i}""" for i in range(4 )]
__lowerCamelCase = '''sent1'''
__lowerCamelCase = '''sent2'''
if data_args.max_seq_length is None:
__lowerCamelCase = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
__lowerCamelCase = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__lowerCamelCase = min(data_args.max_seq_length ,tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_UpperCamelCase : Dict ):
__lowerCamelCase = [[context] * 4 for context in examples[context_name]]
__lowerCamelCase = examples[question_header_name]
__lowerCamelCase = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_UpperCamelCase )
]
# Flatten out
__lowerCamelCase = list(chain(*_UpperCamelCase ) )
__lowerCamelCase = list(chain(*_UpperCamelCase ) )
# Tokenize
__lowerCamelCase = tokenizer(
_UpperCamelCase ,_UpperCamelCase ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase ,padding='''max_length''' if data_args.pad_to_max_length else False ,)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 ,len(_UpperCamelCase ) ,4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
__lowerCamelCase = raw_datasets['''train''']
if data_args.max_train_samples is not None:
__lowerCamelCase = min(len(_UpperCamelCase ) ,data_args.max_train_samples )
__lowerCamelCase = train_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
__lowerCamelCase = train_dataset.map(
_UpperCamelCase ,batched=_UpperCamelCase ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
__lowerCamelCase = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
__lowerCamelCase = min(len(_UpperCamelCase ) ,data_args.max_eval_samples )
__lowerCamelCase = eval_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
__lowerCamelCase = eval_dataset.map(
_UpperCamelCase ,batched=_UpperCamelCase ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
# Data collator
__lowerCamelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_UpperCamelCase ,pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_UpperCamelCase : str ):
__lowerCamelCase ,__lowerCamelCase = eval_predictions
__lowerCamelCase = np.argmax(_UpperCamelCase ,axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__lowerCamelCase = Trainer(
model=_UpperCamelCase ,args=_UpperCamelCase ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,tokenizer=_UpperCamelCase ,data_collator=_UpperCamelCase ,compute_metrics=_UpperCamelCase ,)
# Training
if training_args.do_train:
__lowerCamelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCamelCase = last_checkpoint
__lowerCamelCase = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCamelCase = train_result.metrics
__lowerCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
__lowerCamelCase = min(_UpperCamelCase ,len(_UpperCamelCase ) )
trainer.log_metrics('''train''' ,_UpperCamelCase )
trainer.save_metrics('''train''' ,_UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowerCamelCase = trainer.evaluate()
__lowerCamelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase )
__lowerCamelCase = min(_UpperCamelCase ,len(_UpperCamelCase ) )
trainer.log_metrics('''eval''' ,_UpperCamelCase )
trainer.save_metrics('''eval''' ,_UpperCamelCase )
__lowerCamelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def a__ ( _UpperCamelCase : List[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 714 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """lxmert"""
lowerCAmelCase__ = {}
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=9500 , __UpperCAmelCase=1600 , __UpperCAmelCase=400 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=9 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=2048 , __UpperCAmelCase=4 , __UpperCAmelCase=6.67 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = num_qa_labels
__lowerCamelCase = num_object_labels
__lowerCamelCase = num_attr_labels
__lowerCamelCase = l_layers
__lowerCamelCase = x_layers
__lowerCamelCase = r_layers
__lowerCamelCase = visual_feat_dim
__lowerCamelCase = visual_pos_dim
__lowerCamelCase = visual_loss_normalizer
__lowerCamelCase = task_matched
__lowerCamelCase = task_mask_lm
__lowerCamelCase = task_obj_predict
__lowerCamelCase = task_qa
__lowerCamelCase = visual_obj_loss
__lowerCamelCase = visual_attr_loss
__lowerCamelCase = visual_feat_loss
__lowerCamelCase = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__UpperCAmelCase )
| 622 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = CTRLTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
__lowerCamelCase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = '''adapt react readapt apt'''
__lowerCamelCase = '''adapt react readapt apt'''
return input_text, output_text
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase = '''adapt react readapt apt'''
__lowerCamelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 715 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length, 2) ,_UpperCamelCase )
else:
__lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length) ,_UpperCamelCase )
for i, tensor in enumerate(_UpperCamelCase ):
if padding_side == "right":
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tensor[:sequence_length]
else:
__lowerCamelCase = tensor[:sequence_length]
else:
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tensor[:sequence_length]
else:
__lowerCamelCase = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( _UpperCamelCase : Dict ):
__lowerCamelCase = ord(_UpperCamelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
__lowerCamelCase = unicodedata.category(_UpperCamelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = -1_0_0
lowerCAmelCase__ = "pt"
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
import torch
__lowerCamelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
__lowerCamelCase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowerCamelCase = self.tokenizer.pad(
__UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__lowerCamelCase = torch.tensor(batch['''entity_ids'''] ).shape[1]
__lowerCamelCase = self.tokenizer.padding_side
if padding_side == "right":
__lowerCamelCase = [
list(__UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) for label in labels
]
else:
__lowerCamelCase = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) + list(__UpperCAmelCase ) for label in labels
]
__lowerCamelCase = [feature['''ner_tags'''] for feature in features]
__lowerCamelCase = padding_tensor(__UpperCAmelCase , -1 , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = [feature['''original_entity_spans'''] for feature in features]
__lowerCamelCase = padding_tensor(__UpperCAmelCase , (-1, -1) , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = {k: torch.tensor(__UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 622 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
a_ = logging.get_logger(__name__)
# General docstring
a_ = """MobileNetV1Config"""
# Base docstring
a_ = """google/mobilenet_v1_1.0_224"""
a_ = [1, 1_024, 7, 7]
# Image classification docstring
a_ = """google/mobilenet_v1_1.0_224"""
a_ = """tabby, tabby cat"""
a_ = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : List[Any]=None ):
__lowerCamelCase = {}
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = model.mobilenet_va
else:
__lowerCamelCase = model
__lowerCamelCase = '''MobilenetV1/Conv2d_0/'''
__lowerCamelCase = backbone.conv_stem.convolution.weight
__lowerCamelCase = backbone.conv_stem.normalization.bias
__lowerCamelCase = backbone.conv_stem.normalization.weight
__lowerCamelCase = backbone.conv_stem.normalization.running_mean
__lowerCamelCase = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__lowerCamelCase = i + 1
__lowerCamelCase = i * 2
__lowerCamelCase = backbone.layer[pt_index]
__lowerCamelCase = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
__lowerCamelCase = pointer.convolution.weight
__lowerCamelCase = pointer.normalization.bias
__lowerCamelCase = pointer.normalization.weight
__lowerCamelCase = pointer.normalization.running_mean
__lowerCamelCase = pointer.normalization.running_var
__lowerCamelCase = backbone.layer[pt_index + 1]
__lowerCamelCase = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
__lowerCamelCase = pointer.convolution.weight
__lowerCamelCase = pointer.normalization.bias
__lowerCamelCase = pointer.normalization.weight
__lowerCamelCase = pointer.normalization.running_mean
__lowerCamelCase = pointer.normalization.running_var
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
__lowerCamelCase = model.classifier.weight
__lowerCamelCase = model.classifier.bias
return tf_to_pt_map
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Dict ,_UpperCamelCase : Any ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
__lowerCamelCase = tf.train.list_variables(_UpperCamelCase )
__lowerCamelCase = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
__lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = array
# Build TF to PyTorch weights loading map
__lowerCamelCase = _build_tf_to_pytorch_map(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
__lowerCamelCase = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
__lowerCamelCase = np.transpose(_UpperCamelCase ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
__lowerCamelCase = array.squeeze().transpose()
else:
__lowerCamelCase = np.transpose(_UpperCamelCase ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
tf_weights.pop(_UpperCamelCase ,_UpperCamelCase )
tf_weights.pop(name + '''/RMSProp''' ,_UpperCamelCase )
tf_weights.pop(name + '''/RMSProp_1''' ,_UpperCamelCase )
tf_weights.pop(name + '''/ExponentialMovingAverage''' ,_UpperCamelCase )
logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def a__ ( _UpperCamelCase : torch.Tensor ,_UpperCamelCase : nn.Convad ):
__lowerCamelCase ,__lowerCamelCase = features.shape[-2:]
__lowerCamelCase ,__lowerCamelCase = conv_layer.stride
__lowerCamelCase ,__lowerCamelCase = conv_layer.kernel_size
if in_height % stride_height == 0:
__lowerCamelCase = max(kernel_height - stride_height ,0 )
else:
__lowerCamelCase = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
__lowerCamelCase = max(kernel_width - stride_width ,0 )
else:
__lowerCamelCase = max(kernel_width - (in_width % stride_width) ,0 )
__lowerCamelCase = pad_along_width // 2
__lowerCamelCase = pad_along_width - pad_left
__lowerCamelCase = pad_along_height // 2
__lowerCamelCase = pad_along_height - pad_top
__lowerCamelCase = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCamelCase ,_UpperCamelCase ,'''constant''' ,0.0 )
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = 1 , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = True , ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = config
if in_channels % groups != 0:
raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
__lowerCamelCase = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__lowerCamelCase = nn.Convad(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=__UpperCAmelCase , groups=__UpperCAmelCase , bias=__UpperCAmelCase , padding_mode='''zeros''' , )
if use_normalization:
__lowerCamelCase = nn.BatchNormad(
num_features=__UpperCAmelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=__UpperCAmelCase , track_running_stats=__UpperCAmelCase , )
else:
__lowerCamelCase = None
if use_activation:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __UpperCAmelCase ):
__lowerCamelCase = ACTaFN[config.hidden_act]
else:
__lowerCamelCase = config.hidden_act
else:
__lowerCamelCase = None
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if self.config.tf_padding:
__lowerCamelCase = apply_tf_padding(__UpperCAmelCase , self.convolution )
__lowerCamelCase = self.convolution(__UpperCAmelCase )
if self.normalization is not None:
__lowerCamelCase = self.normalization(__UpperCAmelCase )
if self.activation is not None:
__lowerCamelCase = self.activation(__UpperCAmelCase )
return features
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = MobileNetVaConfig
lowerCAmelCase__ = load_tf_weights_in_mobilenet_va
lowerCAmelCase__ = """mobilenet_v1"""
lowerCAmelCase__ = """pixel_values"""
lowerCAmelCase__ = False
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__UpperCAmelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
a_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
a_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , lowerCAmelCase__ , )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
__lowerCamelCase = config
__lowerCamelCase = 32
__lowerCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
__lowerCamelCase = MobileNetVaConvLayer(
__UpperCAmelCase , in_channels=config.num_channels , out_channels=__UpperCAmelCase , kernel_size=3 , stride=2 , )
__lowerCamelCase = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__lowerCamelCase = nn.ModuleList()
for i in range(13 ):
__lowerCamelCase = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__lowerCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=3 , stride=strides[i] , groups=__UpperCAmelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
__UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=1 , ) )
__lowerCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase ( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
'''simple docstring'''
__lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
__lowerCamelCase = self.conv_stem(__UpperCAmelCase )
__lowerCamelCase = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__lowerCamelCase = layer_module(__UpperCAmelCase )
if output_hidden_states:
__lowerCamelCase = all_hidden_states + (hidden_states,)
__lowerCamelCase = hidden_states
if self.pooler is not None:
__lowerCamelCase = torch.flatten(self.pooler(__UpperCAmelCase ) , start_dim=1 )
else:
__lowerCamelCase = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCAmelCase , pooler_output=__UpperCAmelCase , hidden_states=__UpperCAmelCase , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , lowerCAmelCase__ , )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
__lowerCamelCase = config.num_labels
__lowerCamelCase = MobileNetVaModel(__UpperCAmelCase )
__lowerCamelCase = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__lowerCamelCase = nn.Dropout(config.classifier_dropout_prob , inplace=__UpperCAmelCase )
__lowerCamelCase = nn.Linear(__UpperCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase ( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
'''simple docstring'''
__lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase = self.mobilenet_va(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase )
__lowerCamelCase = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase = self.classifier(self.dropout(__UpperCAmelCase ) )
__lowerCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCamelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCamelCase = '''single_label_classification'''
else:
__lowerCamelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
__lowerCamelCase = MSELoss()
if self.num_labels == 1:
__lowerCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCamelCase = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
__lowerCamelCase = CrossEntropyLoss()
__lowerCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCamelCase = BCEWithLogitsLoss()
__lowerCamelCase = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
if not return_dict:
__lowerCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states , )
| 716 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=[1, 1, 2] , __UpperCAmelCase=1 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=8 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=3 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=False , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = block_sizes
__lowerCamelCase = num_decoder_layers
__lowerCamelCase = d_model
__lowerCamelCase = n_head
__lowerCamelCase = d_head
__lowerCamelCase = d_inner
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = 2
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowerCamelCase = n_head
# Used in the tests to check the size of the first hidden state
__lowerCamelCase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowerCamelCase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowerCamelCase = self.num_hidden_layers + 2
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowerCamelCase = False
__lowerCamelCase = TFFunnelBaseModel(config=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForPreTraining(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForMaskedLM(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForSequenceClassification(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = TFFunnelForMultipleChoice(config=__UpperCAmelCase )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFFunnelForTokenClassification(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = TFFunnelForQuestionAnswering(config=__UpperCAmelCase )
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TFFunnelModelTester(self , base=__UpperCAmelCase )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
| 622 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : int = 16 ):
__lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' )
def tokenize_function(_UpperCamelCase : int ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase = datasets.map(
_UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(_UpperCamelCase : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCamelCase = 8
else:
__lowerCamelCase = None
return tokenizer.pad(
_UpperCamelCase ,padding='''longest''' ,max_length=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_tensors='''pt''' ,)
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets['''train'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase )
__lowerCamelCase = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ = mocked_dataloaders # noqa: F811
def a__ ( _UpperCamelCase : List[str] ,_UpperCamelCase : Tuple ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,_UpperCamelCase ) == "1":
__lowerCamelCase = 2
# New Code #
__lowerCamelCase = int(args.gradient_accumulation_steps )
# Initialize accelerator
__lowerCamelCase = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=_UpperCamelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config['''lr''']
__lowerCamelCase = int(config['''num_epochs'''] )
__lowerCamelCase = int(config['''seed'''] )
__lowerCamelCase = int(config['''batch_size'''] )
__lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' )
set_seed(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = get_dataloaders(_UpperCamelCase ,_UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase = AdamW(params=model.parameters() ,lr=_UpperCamelCase )
# Instantiate scheduler
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(_UpperCamelCase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCamelCase ):
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = output.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase ,references=_UpperCamelCase ,)
__lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,_UpperCamelCase )
def a__ ( ):
__lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' ,type=_UpperCamelCase ,default=1 ,help='''The number of minibatches to be ran before gradients are accumulated.''' ,)
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase ,_UpperCamelCase )
if __name__ == "__main__":
main()
| 717 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
a_ = namedtuple("""covid_data""", """cases deaths recovered""")
def a__ ( _UpperCamelCase : str = "https://www.worldometers.info/coronavirus/" ):
__lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(_UpperCamelCase ).content ).xpath(_UpperCamelCase ) )
a_ = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 622 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
a_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : int ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Tuple ):
for attribute in key.split('''.''' ):
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase )
if weight_type is not None:
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "running_mean":
__lowerCamelCase = value
elif weight_type == "running_var":
__lowerCamelCase = value
elif weight_type == "num_batches_tracked":
__lowerCamelCase = value
elif weight_type == "inv_freq":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : List[Any] ):
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,)
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCamelCase = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
__lowerCamelCase = mapped_key.replace('''*''' ,_UpperCamelCase )
if "pos_bias_u" in name:
__lowerCamelCase = None
elif "pos_bias_v" in name:
__lowerCamelCase = None
elif "weight_g" in name:
__lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
__lowerCamelCase = '''weight_v'''
elif "bias" in name:
__lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase = '''weight'''
elif "running_mean" in name:
__lowerCamelCase = '''running_mean'''
elif "inv_freq" in name:
__lowerCamelCase = '''inv_freq'''
elif "running_var" in name:
__lowerCamelCase = '''running_var'''
elif "num_batches_tracked" in name:
__lowerCamelCase = '''num_batches_tracked'''
else:
__lowerCamelCase = None
set_recursively(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
__lowerCamelCase = name.split('''.''' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Dict ,_UpperCamelCase : Any=None ,_UpperCamelCase : int=None ,_UpperCamelCase : Dict=True ):
if config_path is not None:
__lowerCamelCase = WavaVecaConformerConfig.from_pretrained(_UpperCamelCase ,hidden_act='''swish''' )
else:
__lowerCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowerCamelCase = '''rotary'''
if is_finetuned:
if dict_path:
__lowerCamelCase = Dictionary.load(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCamelCase = target_dict.pad_index
__lowerCamelCase = target_dict.bos_index
__lowerCamelCase = target_dict.eos_index
__lowerCamelCase = len(target_dict.symbols )
__lowerCamelCase = os.path.join(_UpperCamelCase ,'''vocab.json''' )
if not os.path.isdir(_UpperCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase ,exist_ok=_UpperCamelCase )
__lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowerCamelCase = 0
__lowerCamelCase = 1
with open(_UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = WavaVecaCTCTokenizer(
_UpperCamelCase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=_UpperCamelCase ,)
__lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
__lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_60_00 ,padding_value=0 ,do_normalize=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,)
__lowerCamelCase = WavaVecaProcessor(feature_extractor=_UpperCamelCase ,tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
__lowerCamelCase = WavaVecaConformerForCTC(_UpperCamelCase )
else:
__lowerCamelCase = WavaVecaConformerForPreTraining(_UpperCamelCase )
if is_finetuned:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__lowerCamelCase = argparse.Namespace(task='''audio_pretraining''' )
__lowerCamelCase = fairseq.tasks.setup_task(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=_UpperCamelCase )
__lowerCamelCase = model[0].eval()
recursively_load_weights(_UpperCamelCase ,_UpperCamelCase ,not is_finetuned )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
a_ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 718 |
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str = " " ):
__lowerCamelCase = []
__lowerCamelCase = 0
for index, char in enumerate(_UpperCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
__lowerCamelCase = index + 1
elif index + 1 == len(_UpperCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 622 | 0 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a_ = logging.get_logger(__name__)
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Dict ,_UpperCamelCase : Optional[Any] ):
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def a__ ( _UpperCamelCase : np.ndarray ,_UpperCamelCase : Optional[str] ,_UpperCamelCase : Optional[str] = None ):
__lowerCamelCase = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
__lowerCamelCase = to_pil_image(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = pil_image.size
__lowerCamelCase = pytesseract.image_to_data(_UpperCamelCase ,lang=_UpperCamelCase ,output_type='''dict''' ,config=_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
__lowerCamelCase = [idx for idx, word in enumerate(_UpperCamelCase ) if not word.strip()]
__lowerCamelCase = [word for idx, word in enumerate(_UpperCamelCase ) if idx not in irrelevant_indices]
__lowerCamelCase = [coord for idx, coord in enumerate(_UpperCamelCase ) if idx not in irrelevant_indices]
__lowerCamelCase = [coord for idx, coord in enumerate(_UpperCamelCase ) if idx not in irrelevant_indices]
__lowerCamelCase = [coord for idx, coord in enumerate(_UpperCamelCase ) if idx not in irrelevant_indices]
__lowerCamelCase = [coord for idx, coord in enumerate(_UpperCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowerCamelCase = []
for x, y, w, h in zip(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = [x, y, x + w, y + h]
actual_boxes.append(_UpperCamelCase )
# finally, normalize the bounding boxes
__lowerCamelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) )
assert len(_UpperCamelCase ) == len(_UpperCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = ["""pixel_values"""]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = "" , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__lowerCamelCase = size if size is not None else {'''height''': 224, '''width''': 224}
__lowerCamelCase = get_size_dict(__UpperCAmelCase )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = resample
__lowerCamelCase = apply_ocr
__lowerCamelCase = ocr_lang
__lowerCamelCase = tesseract_config
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCamelCase = (size['''height'''], size['''width'''])
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(__UpperCAmelCase )
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
__lowerCamelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
__lowerCamelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
__lowerCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
__lowerCamelCase = []
__lowerCamelCase = []
for image in images:
__lowerCamelCase ,__lowerCamelCase = apply_tesseract(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
words_batch.append(__UpperCAmelCase )
boxes_batch.append(__UpperCAmelCase )
if do_resize:
__lowerCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__lowerCamelCase = [flip_channel_order(__UpperCAmelCase ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__lowerCamelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=__UpperCAmelCase )
if apply_ocr:
__lowerCamelCase = words_batch
__lowerCamelCase = boxes_batch
return data
| 719 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = 8
# DPR tok
__lowerCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowerCamelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__lowerCamelCase = os.path.join(__UpperCAmelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowerCamelCase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__lowerCamelCase = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_dataset()
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowerCamelCase = dataset
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_dataset()
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
__lowerCamelCase = os.path.join(self.tmpdirname , '''dataset''' )
__lowerCamelCase = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase ) , )
return retriever
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowerCamelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
__lowerCamelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
__lowerCamelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__UpperCAmelCase , open(__UpperCAmelCase , '''wb''' ) )
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowerCamelCase = self.get_dummy_dataset()
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_legacy_index_retriever()
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase ( self ):
'''simple docstring'''
import torch
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCamelCase = [[5, 7], [10, 11]]
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
__lowerCamelCase = retriever(
__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase , return_tensors='''pt''' , )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dpr_ctx_encoder_tokenizer()
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
retriever.set_ctx_encoder_tokenizer(__UpperCAmelCase )
__lowerCamelCase = [[5, 7], [10, 11]]
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
self.assertEqual(
len(__UpperCAmelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __UpperCAmelCase ) # check for doc token related keys in dictionary.
| 622 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 720 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """poolformer"""
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=4.0 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[64, 128, 320, 512] , __UpperCAmelCase=[7, 3, 3, 3] , __UpperCAmelCase=[4, 2, 2, 2] , __UpperCAmelCase=[2, 1, 1, 1] , __UpperCAmelCase=4 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.02 , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = stride
__lowerCamelCase = padding
__lowerCamelCase = pool_size
__lowerCamelCase = hidden_sizes
__lowerCamelCase = mlp_ratio
__lowerCamelCase = depths
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_layer_scale
__lowerCamelCase = layer_scale_init_value
__lowerCamelCase = initializer_range
super().__init__(**__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return 2E-3
| 622 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """cvt"""
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 192, 384] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__lowerCamelCase = num_channels
__lowerCamelCase = patch_sizes
__lowerCamelCase = patch_stride
__lowerCamelCase = patch_padding
__lowerCamelCase = embed_dim
__lowerCamelCase = num_heads
__lowerCamelCase = depth
__lowerCamelCase = mlp_ratio
__lowerCamelCase = attention_drop_rate
__lowerCamelCase = drop_rate
__lowerCamelCase = drop_path_rate
__lowerCamelCase = qkv_bias
__lowerCamelCase = cls_token
__lowerCamelCase = qkv_projection_method
__lowerCamelCase = kernel_qkv
__lowerCamelCase = padding_kv
__lowerCamelCase = stride_kv
__lowerCamelCase = padding_q
__lowerCamelCase = stride_q
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
| 721 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """visual_bert"""
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=512 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = visual_embedding_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = type_vocab_size
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = bypass_transformer
__lowerCamelCase = special_visual_initialize
| 622 | 0 |
"""simple docstring"""
import operator as op
def lowerCAmelCase_( lowercase_ : Dict ) -> Dict:
_lowerCamelCase = []
_lowerCamelCase = lambda lowercase_ , lowercase_ : int(x / y ) # noqa: E731 integer division operation
_lowerCamelCase = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(lowercase_ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase_ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(lowercase_ ) , sep=''' | ''' )
else:
_lowerCamelCase = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(lowercase_ ) , sep=''' | ''' )
_lowerCamelCase = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(lowercase_ ) , sep=''' | ''' )
stack.append(
str(opr[x](int(lowercase_ ) , int(lowercase_ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(lowercase_ ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 623 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__SCREAMING_SNAKE_CASE : List[str] = numpy.array([0, 0])
__SCREAMING_SNAKE_CASE : Optional[Any] = numpy.array([0.5, 0.866_0254])
__SCREAMING_SNAKE_CASE : Tuple = numpy.array([1, 0])
__SCREAMING_SNAKE_CASE : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] , lowercase_ : int ) -> list[numpy.ndarray]:
_lowerCamelCase = initial_vectors
for _ in range(lowercase_ ):
_lowerCamelCase = iteration_step(lowercase_ )
return vectors
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
_lowerCamelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCamelCase = vectors[i + 1]
new_vectors.append(lowercase_ )
_lowerCamelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase_( lowercase_ : numpy.ndarray , lowercase_ : float ) -> numpy.ndarray:
_lowerCamelCase = numpy.radians(lowercase_ )
_lowerCamelCase , _lowerCamelCase = numpy.cos(lowercase_ ), numpy.sin(lowercase_ )
_lowerCamelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> None:
_lowerCamelCase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCamelCase , _lowerCamelCase = zip(*lowercase_ )
plt.plot(lowercase_ , lowercase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : str = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 623 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[int] = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 |
"""simple docstring"""
from typing import Any
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = data
_lowerCamelCase = None
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = None
def snake_case__ ( self ):
_lowerCamelCase = self.head
while temp is not None:
print(temp.data , end=''' ''' )
_lowerCamelCase = temp.next
print()
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = Node(lowerCamelCase__ )
_lowerCamelCase = self.head
_lowerCamelCase = new_node
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
if node_data_a == node_data_a:
return
else:
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
if node_a is None or node_a is None:
return
_lowerCamelCase , _lowerCamelCase = node_a.data, node_a.data
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 623 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : torch.FloatTensor
lowercase__ : torch.FloatTensor
lowercase__ : Optional[torch.FloatTensor] = None
class lowerCamelCase_( A__, A__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = 2
@register_to_config
def __init__( self , lowerCamelCase__ = 0.0_2 , lowerCamelCase__ = 1_0_0 , lowerCamelCase__ = 1.0_0_7 , lowerCamelCase__ = 8_0 , lowerCamelCase__ = 0.0_5 , lowerCamelCase__ = 5_0 , ):
# standard deviation of the initial noise distribution
_lowerCamelCase = sigma_max
# setable values
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None # sigma(t_i)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
return sample
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = num_inference_steps
_lowerCamelCase = np.arange(0 , self.num_inference_steps )[::-1].copy()
_lowerCamelCase = torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ )
_lowerCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_lowerCamelCase = torch.tensor(lowerCamelCase__ , dtype=torch.floataa , device=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None ):
if self.config.s_min <= sigma <= self.config.s_max:
_lowerCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_lowerCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
_lowerCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCamelCase__ ).to(sample.device )
_lowerCamelCase = sigma + gamma * sigma
_lowerCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ):
_lowerCamelCase = sample_hat + sigma_hat * model_output
_lowerCamelCase = (sample_hat - pred_original_sample) / sigma_hat
_lowerCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCamelCase__ , derivative=lowerCamelCase__ , pred_original_sample=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ):
_lowerCamelCase = sample_prev + sigma_prev * model_output
_lowerCamelCase = (sample_prev - pred_original_sample) / sigma_prev
_lowerCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCamelCase__ , derivative=lowerCamelCase__ , pred_original_sample=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
raise NotImplementedError()
| 623 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__SCREAMING_SNAKE_CASE : Optional[Any] = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCAmelCase_( lowercase_ : List[Any] ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[str] ) -> List[str]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
| 623 | 1 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : str = False
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
__SCREAMING_SNAKE_CASE : List[Any] = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
__SCREAMING_SNAKE_CASE : Any = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
__SCREAMING_SNAKE_CASE : Any = reader.read()
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
__SCREAMING_SNAKE_CASE : str = UNetaDModel(**config)
else:
__SCREAMING_SNAKE_CASE : int = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
__SCREAMING_SNAKE_CASE : List[str] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__SCREAMING_SNAKE_CASE : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__SCREAMING_SNAKE_CASE : Optional[Any] = config[key]
del config[key]
__SCREAMING_SNAKE_CASE : List[Any] = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
__SCREAMING_SNAKE_CASE : int = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
__SCREAMING_SNAKE_CASE : str = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
__SCREAMING_SNAKE_CASE : Optional[int] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
__SCREAMING_SNAKE_CASE : Tuple = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
__SCREAMING_SNAKE_CASE : Optional[int] = param_value
__SCREAMING_SNAKE_CASE : List[Any] = True
if not has_changed:
__SCREAMING_SNAKE_CASE : int = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 623 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 623 | 1 |
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Dict = 1.6_0_2_1e-1_9 # units = C
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=9_9 , lowerCamelCase__=1_3 , lowerCamelCase__=1_6 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=2 , lowerCamelCase__=3_2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=3_0 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = decoder_seq_length
# For common tests
_lowerCamelCase = self.decoder_seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = d_model
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = eos_token_id
_lowerCamelCase = bos_token_id
_lowerCamelCase = pad_token_id
_lowerCamelCase = decoder_start_token_id
_lowerCamelCase = use_cache
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = None
_lowerCamelCase = decoder_seq_length
_lowerCamelCase = 2
_lowerCamelCase = 1
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
_lowerCamelCase = True
_lowerCamelCase = TrOCRDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
_lowerCamelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
_lowerCamelCase = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase = model(lowerCamelCase__ )['''last_hidden_state''']
_lowerCamelCase = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ )['''last_hidden_state''']
# select random slice
_lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase__ : List[str] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowercase__ : Tuple = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
lowercase__ : Dict = True
lowercase__ : Optional[Any] = False
def snake_case__ ( self ):
_lowerCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCamelCase__ )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCamelCase__ )
def snake_case__ ( self ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def snake_case__ ( self ):
pass
| 623 | 1 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__SCREAMING_SNAKE_CASE : List[Any] = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
__SCREAMING_SNAKE_CASE : Optional[Any] = None
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=lowercase_ , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=lowercase_ , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCAmelCase_( lowercase_ : int ) -> List[str]:
_lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowerCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def lowerCAmelCase_( lowercase_ : str ) -> List[str]:
def remove_articles(lowercase_ : List[Any] ):
return ARTICLES_REGEX.sub(''' ''' , lowercase_ )
def white_space_fix(lowercase_ : int ):
return " ".join(text.split() )
def remove_punc(lowercase_ : List[str] ):
_lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def lowerCAmelCase_( lowercase_ : int ) -> List[str]:
if not s:
return []
return normalize_answer(lowercase_ ).split()
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : Optional[Any] ) -> Any:
return int(normalize_answer(lowercase_ ) == normalize_answer(lowercase_ ) )
def lowerCAmelCase_( lowercase_ : Union[str, Any] , lowercase_ : Any ) -> List[str]:
_lowerCamelCase = get_tokens(lowercase_ )
_lowerCamelCase = get_tokens(lowercase_ )
_lowerCamelCase = collections.Counter(lowercase_ ) & collections.Counter(lowercase_ )
_lowerCamelCase = sum(common.values() )
if len(lowercase_ ) == 0 or len(lowercase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_lowerCamelCase = 1.0 * num_same / len(lowercase_ )
_lowerCamelCase = 1.0 * num_same / len(lowercase_ )
_lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase_( lowercase_ : int , lowercase_ : Any ) -> List[str]:
_lowerCamelCase = {}
_lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowerCamelCase = qa['''id''']
_lowerCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(lowercase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_lowerCamelCase = ['''''']
if qid not in preds:
print(F"""Missing prediction for {qid}""" )
continue
_lowerCamelCase = preds[qid]
# Take max over all gold answers
_lowerCamelCase = max(compute_exact(lowercase_ , lowercase_ ) for a in gold_answers )
_lowerCamelCase = max(compute_fa(lowercase_ , lowercase_ ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[int] ) -> Union[str, Any]:
_lowerCamelCase = {}
for qid, s in scores.items():
_lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
_lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
_lowerCamelCase = s
return new_scores
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[Any]=None ) -> Any:
if not qid_list:
_lowerCamelCase = len(lowercase_ )
return collections.OrderedDict(
[
('''exact''', 1_0_0.0 * sum(exact_scores.values() ) / total),
('''f1''', 1_0_0.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
_lowerCamelCase = len(lowercase_ )
return collections.OrderedDict(
[
('''exact''', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Any ) -> List[Any]:
for k in new_eval:
_lowerCamelCase = new_eval[k]
def lowerCAmelCase_( lowercase_ : int , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Optional[Any]:
plt.step(lowercase_ , lowercase_ , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(lowercase_ , lowercase_ , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(lowercase_ )
plt.savefig(lowercase_ )
plt.clf()
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Any=None ) -> Union[str, Any]:
_lowerCamelCase = sorted(lowercase_ , key=lambda lowercase_ : na_probs[k] )
_lowerCamelCase = 0.0
_lowerCamelCase = 1.0
_lowerCamelCase = 0.0
_lowerCamelCase = [1.0]
_lowerCamelCase = [0.0]
_lowerCamelCase = 0.0
for i, qid in enumerate(lowercase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_lowerCamelCase = true_pos / float(i + 1 )
_lowerCamelCase = true_pos / float(lowercase_ )
if i == len(lowercase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowercase_ )
recalls.append(lowercase_ )
if out_image:
plot_pr_curve(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return {"ap": 1_0_0.0 * avg_prec}
def lowerCAmelCase_( lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Union[str, Any] ) -> str:
if out_image_dir and not os.path.exists(lowercase_ ):
os.makedirs(lowercase_ )
_lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_lowerCamelCase = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
_lowerCamelCase = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
_lowerCamelCase = {k: float(lowercase_ ) for k, v in qid_to_has_ans.items()}
_lowerCamelCase = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(lowercase_ , lowercase_ , '''pr_exact''' )
merge_eval(lowercase_ , lowercase_ , '''pr_f1''' )
merge_eval(lowercase_ , lowercase_ , '''pr_oracle''' )
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ) -> Tuple:
if not qid_list:
return
_lowerCamelCase = [na_probs[k] for k in qid_list]
_lowerCamelCase = np.ones_like(lowercase_ ) / float(len(lowercase_ ) )
plt.hist(lowercase_ , weights=lowercase_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(F"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowercase_ , F"""na_prob_hist_{name}.png""" ) )
plt.clf()
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : List[Any] ) -> Optional[Any]:
_lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_lowerCamelCase = num_no_ans
_lowerCamelCase = cur_score
_lowerCamelCase = 0.0
_lowerCamelCase = sorted(lowercase_ , key=lambda lowercase_ : na_probs[k] )
for i, qid in enumerate(lowercase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_lowerCamelCase = scores[qid]
else:
if preds[qid]:
_lowerCamelCase = -1
else:
_lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
_lowerCamelCase = cur_score
_lowerCamelCase = na_probs[qid]
return 1_0_0.0 * best_score / len(lowercase_ ), best_thresh
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Optional[Any] ) -> Dict:
_lowerCamelCase , _lowerCamelCase = find_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase , _lowerCamelCase = find_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase = best_exact
_lowerCamelCase = exact_thresh
_lowerCamelCase = best_fa
_lowerCamelCase = fa_thresh
def lowerCAmelCase_( ) -> Tuple:
with open(OPTS.data_file ) as f:
_lowerCamelCase = json.load(lowercase_ )
_lowerCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
_lowerCamelCase = json.load(lowercase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_lowerCamelCase = json.load(lowercase_ )
else:
_lowerCamelCase = {k: 0.0 for k in preds}
_lowerCamelCase = make_qid_to_has_ans(lowercase_ ) # maps qid to True/False
_lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
_lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
_lowerCamelCase , _lowerCamelCase = get_raw_scores(lowercase_ , lowercase_ )
_lowerCamelCase = apply_no_ans_threshold(lowercase_ , lowercase_ , lowercase_ , OPTS.na_prob_thresh )
_lowerCamelCase = apply_no_ans_threshold(lowercase_ , lowercase_ , lowercase_ , OPTS.na_prob_thresh )
_lowerCamelCase = make_eval_dict(lowercase_ , lowercase_ )
if has_ans_qids:
_lowerCamelCase = make_eval_dict(lowercase_ , lowercase_ , qid_list=lowercase_ )
merge_eval(lowercase_ , lowercase_ , '''HasAns''' )
if no_ans_qids:
_lowerCamelCase = make_eval_dict(lowercase_ , lowercase_ , qid_list=lowercase_ )
merge_eval(lowercase_ , lowercase_ , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , OPTS.out_image_dir )
histogram_na_prob(lowercase_ , lowercase_ , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(lowercase_ , lowercase_ , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
else:
print(json.dumps(lowercase_ , indent=2 ) )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 623 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__=None , **lowerCamelCase__ ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCamelCase__ , )
super().__init__(args=lowerCamelCase__ , **lowerCamelCase__ )
| 623 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] ) -> Any: # noqa: E741
while r - l > 1:
_lowerCamelCase = (l + r) // 2
if v[m] >= key:
_lowerCamelCase = m
else:
_lowerCamelCase = m # noqa: E741
return r
def lowerCAmelCase_( lowercase_ : list[int] ) -> int:
if len(lowercase_ ) == 0:
return 0
_lowerCamelCase = [0] * len(lowercase_ )
_lowerCamelCase = 1
_lowerCamelCase = v[0]
for i in range(1 , len(lowercase_ ) ):
if v[i] < tail[0]:
_lowerCamelCase = v[i]
elif v[i] > tail[length - 1]:
_lowerCamelCase = v[i]
length += 1
else:
_lowerCamelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=5_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=7 , lowerCamelCase__="gelu_new" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=4 , lowerCamelCase__="block_sparse" , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=2 , lowerCamelCase__=3 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_choices
_lowerCamelCase = rescale_embeddings
_lowerCamelCase = attention_type
_lowerCamelCase = use_bias
_lowerCamelCase = block_size
_lowerCamelCase = num_random_blocks
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[str] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def snake_case__ ( self ):
_lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_hidden_states_output()
@slow
def snake_case__ ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , **lowerCamelCase__ )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1e-5 , lowerCamelCase__="outputs" , lowerCamelCase__=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
| 623 | 1 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Union[str, Any]=False ) -> Dict:
try:
_lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
_lowerCamelCase = strtobool(lowercase_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__SCREAMING_SNAKE_CASE : int = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowerCAmelCase_( lowercase_ : str ) -> Union[str, Any]:
return unittest.skip('''Test was skipped''' )(lowercase_ )
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> Tuple:
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowercase_ )
def lowerCAmelCase_( lowercase_ : Dict ) -> Optional[int]:
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[int] ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowercase_ )
def lowerCAmelCase_( lowercase_ : Tuple ) -> int:
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[Any] ) -> List[str]:
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[int] ) -> str:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowercase_ )
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> Dict:
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowercase_ )
def lowerCAmelCase_( lowercase_ : Any ) -> List[str]:
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[str] ) -> Dict:
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[Any] ) -> List[Any]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[int] ) -> Optional[Any]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowercase_ )
def lowerCAmelCase_( lowercase_ : Any ) -> Tuple:
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> List[Any]:
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowercase_ )
def lowerCAmelCase_( lowercase_ : str ) -> List[Any]:
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowercase_ )
def lowerCAmelCase_( lowercase_ : str ) -> int:
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[Any]=None , lowercase_ : List[Any]=None ) -> List[Any]:
if test_case is None:
return partial(lowercase_ , version=lowercase_ )
return unittest.skipUnless(is_torch_version('''>=''' , lowercase_ ) , F"""test requires torch version >= {version}""" )(lowercase_ )
def lowerCAmelCase_( lowercase_ : Any ) -> Optional[int]:
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[str] ) -> str:
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowercase_ )
def lowerCAmelCase_( lowercase_ : Tuple ) -> List[Any]:
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowercase_ )
__SCREAMING_SNAKE_CASE : Optional[Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCAmelCase_( lowercase_ : Optional[int] ) -> Tuple:
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowercase_ )
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Union[str, Any] = True
@classmethod
def snake_case__ ( cls ):
_lowerCamelCase = tempfile.mkdtemp()
@classmethod
def snake_case__ ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def snake_case__ ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowerCamelCase__ )
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = mocks if isinstance(lowerCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCAmelCase_( lowercase_ : Optional[int] ) -> Any:
_lowerCamelCase = AcceleratorState()
_lowerCamelCase = tensor[None].clone().to(state.device )
_lowerCamelCase = gather(lowercase_ ).cpu()
_lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowercase_ ):
return False
return True
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = returncode
_lowerCamelCase = stdout
_lowerCamelCase = stderr
async def lowerCAmelCase_( lowercase_ : Union[str, Any] , lowercase_ : int ) -> Optional[int]:
while True:
_lowerCamelCase = await stream.readline()
if line:
callback(lowercase_ )
else:
break
async def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : Union[str, Any]=None , lowercase_ : Tuple=None , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]=False , lowercase_ : Union[str, Any]=False ) -> _RunOutput:
if echo:
print('''\nRunning: ''' , ''' '''.join(lowercase_ ) )
_lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowercase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowercase_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_lowerCamelCase = []
_lowerCamelCase = []
def tee(lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str]="" ):
_lowerCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(lowercase_ )
if not quiet:
print(lowercase_ , lowercase_ , file=lowercase_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowercase_ : tee(lowercase_ , lowercase_ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowercase_ : tee(lowercase_ , lowercase_ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowercase_ , )
return _RunOutput(await p.wait() , lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Any=1_80 , lowercase_ : Optional[int]=False , lowercase_ : Optional[Any]=True ) -> _RunOutput:
_lowerCamelCase = asyncio.get_event_loop()
_lowerCamelCase = loop.run_until_complete(
_stream_subprocess(lowercase_ , env=lowercase_ , stdin=lowercase_ , timeout=lowercase_ , quiet=lowercase_ , echo=lowercase_ ) )
_lowerCamelCase = ''' '''.join(lowercase_ )
if result.returncode > 0:
_lowerCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class lowerCamelCase_( A__ ):
'''simple docstring'''
pass
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Dict=False ) -> Optional[int]:
try:
_lowerCamelCase = subprocess.check_output(lowercase_ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowercase_ , '''decode''' ):
_lowerCamelCase = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(lowercase_ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 623 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = StableDiffusionXLImgaImgPipeline
lowercase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase__ : int = PipelineTesterMixin.required_optional_params - {'latents'}
lowercase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase__ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
_lowerCamelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
_lowerCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , )
_lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = CLIPTextModelWithProjection(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
_lowerCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_lowerCamelCase = image / 2 + 0.5
if str(lowerCamelCase__ ).startswith('''mps''' ):
_lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = sd_pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# forward without prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = negative_prompt
_lowerCamelCase = 3 * [inputs['''prompt''']]
_lowerCamelCase = sd_pipe(**lowerCamelCase__ )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = sd_pipe.encode_prompt(lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
_lowerCamelCase = sd_pipe(
**lowerCamelCase__ , prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , pooled_prompt_embeds=lowerCamelCase__ , negative_pooled_prompt_embeds=lowerCamelCase__ , )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=0 ):
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 6_4, 6_4) )
_lowerCamelCase = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs(lowerCamelCase__ )
_lowerCamelCase = pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 623 | 1 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__SCREAMING_SNAKE_CASE : Optional[int] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
__SCREAMING_SNAKE_CASE : Any = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
__SCREAMING_SNAKE_CASE : int = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCamelCase_( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=[1, 1_0, 1_0_0] , lowerCamelCase__=4 , lowerCamelCase__=3.0 ):
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=lowerCamelCase__ ) as executor:
_lowerCamelCase = []
_lowerCamelCase = Counter()
_lowerCamelCase = 0
_lowerCamelCase = defaultdict(lowerCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
for candidate in candidates:
_lowerCamelCase = candidate + '''\n''' + test_case
_lowerCamelCase = (test_program, timeout, task_id, completion_id[task_id])
_lowerCamelCase = executor.submit(lowerCamelCase__ , *lowerCamelCase__ )
futures.append(lowerCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase__ ):
_lowerCamelCase = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
_lowerCamelCase , _lowerCamelCase = [], []
for result in results.values():
result.sort()
_lowerCamelCase = [r[1]['''passed'''] for r in result]
total.append(len(lowerCamelCase__ ) )
correct.append(sum(lowerCamelCase__ ) )
_lowerCamelCase = np.array(lowerCamelCase__ )
_lowerCamelCase = np.array(lowerCamelCase__ )
_lowerCamelCase = k
_lowerCamelCase = {F"""pass@{k}""": estimate_pass_at_k(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : List[Any] ) -> Tuple:
def estimator(lowercase_ : int , lowercase_ : int , lowercase_ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(lowercase_ , lowercase_ ):
_lowerCamelCase = itertools.repeat(lowercase_ , len(lowercase_ ) )
else:
assert len(lowercase_ ) == len(lowercase_ )
_lowerCamelCase = iter(lowercase_ )
return np.array([estimator(int(lowercase_ ) , int(lowercase_ ) , lowercase_ ) for n, c in zip(lowercase_ , lowercase_ )] )
| 623 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 1 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=9_9 , lowerCamelCase__=1_3 , lowerCamelCase__=1_6 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=2 , lowerCamelCase__=3_2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=3_0 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = decoder_seq_length
# For common tests
_lowerCamelCase = self.decoder_seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = d_model
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = eos_token_id
_lowerCamelCase = bos_token_id
_lowerCamelCase = pad_token_id
_lowerCamelCase = decoder_start_token_id
_lowerCamelCase = use_cache
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = None
_lowerCamelCase = decoder_seq_length
_lowerCamelCase = 2
_lowerCamelCase = 1
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
_lowerCamelCase = True
_lowerCamelCase = TrOCRDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
_lowerCamelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
_lowerCamelCase = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase = model(lowerCamelCase__ )['''last_hidden_state''']
_lowerCamelCase = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ )['''last_hidden_state''']
# select random slice
_lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase__ : List[str] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowercase__ : Tuple = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
lowercase__ : Dict = True
lowercase__ : Optional[Any] = False
def snake_case__ ( self ):
_lowerCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCamelCase__ )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCamelCase__ )
def snake_case__ ( self ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def snake_case__ ( self ):
pass
| 623 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Dict = random.Random()
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : int=1.0 , lowercase_ : str=None , lowercase_ : Optional[int]=None ) -> Any:
if rng is None:
_lowerCamelCase = global_rng
_lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=4_0_0 , lowerCamelCase__=2_0_0_0 , lowerCamelCase__=1_0 , lowerCamelCase__=1_6_0 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=4_0_0_0 , lowerCamelCase__=False , lowerCamelCase__=True , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = min_seq_length
_lowerCamelCase = max_seq_length
_lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase = padding_value
_lowerCamelCase = sampling_rate
_lowerCamelCase = return_attention_mask
_lowerCamelCase = do_normalize
_lowerCamelCase = feature_size
_lowerCamelCase = chunk_length
_lowerCamelCase = hop_length
def snake_case__ ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self ):
_lowerCamelCase = WhisperFeatureExtractionTester(self )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase = feature_extractor(lowerCamelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase = np.asarray(lowerCamelCase__ )
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test truncation required
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
_lowerCamelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def snake_case__ ( self ):
import torch
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowerCamelCase = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = WhisperFeatureExtractor()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , lowerCamelCase__ , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = self._load_datasamples(1 )[0]
_lowerCamelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_lowerCamelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
| 623 | 1 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : int ) -> int:
if n == 1 or not isinstance(lowercase_ , lowercase_ ):
return 0
elif n == 2:
return 1
else:
_lowerCamelCase = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowerCAmelCase_( lowercase_ : int ) -> int:
_lowerCamelCase = 0
_lowerCamelCase = 2
while digits < n:
index += 1
_lowerCamelCase = len(str(fibonacci(lowercase_ ) ) )
return index
def lowerCAmelCase_( lowercase_ : int = 10_00 ) -> int:
return fibonacci_digits_index(lowercase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 623 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str ) -> bool:
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_lowerCamelCase = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_lowerCamelCase = True
if a[i].islower():
_lowerCamelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def lowerCAmelCase_( lowercase_ : Tuple ) -> Optional[int]:
return choice(lowercase_ )
def lowerCAmelCase_( lowercase_ : list[int] , lowercase_ : int ) -> int:
_lowerCamelCase = random_pivot(lowercase_ )
# partition based on pivot
# linear time
_lowerCamelCase = [e for e in lst if e < pivot]
_lowerCamelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowercase_ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowercase_ ) < k - 1:
return kth_number(lowercase_ , k - len(lowercase_ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 1 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class lowerCamelCase_( enum.Enum ):
'''simple docstring'''
lowercase__ : Dict = 0
lowercase__ : Union[str, Any] = 1
@add_end_docstrings(A__ )
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = 'generated'
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def snake_case__ ( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ , ):
_lowerCamelCase = {}
if truncation is not None:
_lowerCamelCase = truncation
_lowerCamelCase = generate_kwargs
_lowerCamelCase = {}
if return_tensors is not None and return_type is None:
_lowerCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_lowerCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_lowerCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_lowerCamelCase = self.tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
if len(lowerCamelCase__ ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_lowerCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return True
def snake_case__ ( self , *lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , lowerCamelCase__ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
_lowerCamelCase = ([prefix + arg for arg in args[0]],)
_lowerCamelCase = True
elif isinstance(args[0] , lowerCamelCase__ ):
_lowerCamelCase = (prefix + args[0],)
_lowerCamelCase = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_lowerCamelCase = self.tokenizer(*lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
_lowerCamelCase = super().__call__(*lowerCamelCase__ , **lowerCamelCase__ )
if (
isinstance(args[0] , lowerCamelCase__ )
and all(isinstance(lowerCamelCase__ , lowerCamelCase__ ) for el in args[0] )
and all(len(lowerCamelCase__ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=TruncationStrategy.DO_NOT_TRUNCATE , **lowerCamelCase__ ):
_lowerCamelCase = self._parse_and_tokenize(lowerCamelCase__ , truncation=lowerCamelCase__ , **lowerCamelCase__ )
return inputs
def snake_case__ ( self , lowerCamelCase__ , **lowerCamelCase__ ):
if self.framework == "pt":
_lowerCamelCase , _lowerCamelCase = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
_lowerCamelCase , _lowerCamelCase = tf.shape(model_inputs['''input_ids'''] ).numpy()
_lowerCamelCase = generate_kwargs.get('''min_length''' , self.model.config.min_length )
_lowerCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(lowerCamelCase__ , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
_lowerCamelCase = self.model.generate(**lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = output_ids.shape[0]
if self.framework == "pt":
_lowerCamelCase = output_ids.reshape(lowerCamelCase__ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
_lowerCamelCase = tf.reshape(lowerCamelCase__ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=ReturnType.TEXT , lowerCamelCase__=False ):
_lowerCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_lowerCamelCase = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_lowerCamelCase = {
F"""{self.return_name}_text""": self.tokenizer.decode(
lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ , )
}
records.append(lowerCamelCase__ )
return records
@add_end_docstrings(A__ )
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[str] = 'summary'
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
return super().__call__(*lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(A__ )
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[Any] = 'translation'
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def snake_case__ ( self , *lowerCamelCase__ , lowerCamelCase__=TruncationStrategy.DO_NOT_TRUNCATE , lowerCamelCase__=None , lowerCamelCase__=None ):
if getattr(self.tokenizer , '''_build_translation_inputs''' , lowerCamelCase__ ):
return self.tokenizer._build_translation_inputs(
*lowerCamelCase__ , return_tensors=self.framework , truncation=lowerCamelCase__ , src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ )
else:
return super()._parse_and_tokenize(*lowerCamelCase__ , truncation=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = super()._sanitize_parameters(**lowerCamelCase__ )
if src_lang is not None:
_lowerCamelCase = src_lang
if tgt_lang is not None:
_lowerCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_lowerCamelCase = kwargs.get('''task''' , self.task )
_lowerCamelCase = task.split('''_''' )
if task and len(lowerCamelCase__ ) == 4:
# translation, XX, to YY
_lowerCamelCase = items[1]
_lowerCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
return super().__call__(*lowerCamelCase__ , **lowerCamelCase__ )
| 623 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = StableDiffusionXLImgaImgPipeline
lowercase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase__ : int = PipelineTesterMixin.required_optional_params - {'latents'}
lowercase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase__ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
_lowerCamelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
_lowerCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , )
_lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = CLIPTextModelWithProjection(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
_lowerCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_lowerCamelCase = image / 2 + 0.5
if str(lowerCamelCase__ ).startswith('''mps''' ):
_lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = sd_pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# forward without prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = negative_prompt
_lowerCamelCase = 3 * [inputs['''prompt''']]
_lowerCamelCase = sd_pipe(**lowerCamelCase__ )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = sd_pipe.encode_prompt(lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
_lowerCamelCase = sd_pipe(
**lowerCamelCase__ , prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , pooled_prompt_embeds=lowerCamelCase__ , negative_pooled_prompt_embeds=lowerCamelCase__ , )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=0 ):
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 6_4, 6_4) )
_lowerCamelCase = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs(lowerCamelCase__ )
_lowerCamelCase = pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 623 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCAmelCase_( lowercase_ : list[Any] ) -> None:
create_state_space_tree(lowercase_ , [] , 0 )
def lowerCAmelCase_( lowercase_ : list[Any] , lowercase_ : list[Any] , lowercase_ : int ) -> None:
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 623 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__SCREAMING_SNAKE_CASE : Any = trt.Logger(trt.Logger.WARNING)
__SCREAMING_SNAKE_CASE : Dict = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_8_4,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_2_8,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=2_0,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=3_0,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=4_2, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
if args.tokenizer_name:
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__SCREAMING_SNAKE_CASE : Any = args.per_device_eval_batch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : List[str] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__SCREAMING_SNAKE_CASE : List[Any] = '''temp_engine/bert-fp16.engine'''
if args.inta:
__SCREAMING_SNAKE_CASE : int = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__SCREAMING_SNAKE_CASE : str = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__SCREAMING_SNAKE_CASE : Dict = [network.get_input(i) for i in range(network.num_inputs)]
__SCREAMING_SNAKE_CASE : Optional[int] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__SCREAMING_SNAKE_CASE : str = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__SCREAMING_SNAKE_CASE : int = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__SCREAMING_SNAKE_CASE : Any = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def lowerCAmelCase_( lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : Any , lowercase_ : Union[str, Any] ) -> Tuple:
_lowerCamelCase = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
_lowerCamelCase = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
_lowerCamelCase = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase_ )
# start time
_lowerCamelCase = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase_ ) for d_inp in d_inputs] + [int(lowercase_ ), int(lowercase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase_ , lowercase_ , lowercase_ )
cuda.memcpy_dtoh_async(lowercase_ , lowercase_ , lowercase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
_lowerCamelCase = time.time()
_lowerCamelCase = end_time - start_time
_lowerCamelCase = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__SCREAMING_SNAKE_CASE : Union[str, Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__SCREAMING_SNAKE_CASE : Tuple = raw_datasets['''validation'''].column_names
__SCREAMING_SNAKE_CASE : int = '''question''' if '''question''' in column_names else column_names[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = '''context''' if '''context''' in column_names else column_names[1]
__SCREAMING_SNAKE_CASE : Optional[Any] = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__SCREAMING_SNAKE_CASE : Dict = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCAmelCase_( lowercase_ : Dict ) -> Optional[int]:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
_lowerCamelCase = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_lowerCamelCase = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=lowercase_ , stride=args.doc_stride , return_overflowing_tokens=lowercase_ , return_offsets_mapping=lowercase_ , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_lowerCamelCase = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_lowerCamelCase = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_lowerCamelCase = tokenized_examples.sequence_ids(lowercase_ )
_lowerCamelCase = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_lowerCamelCase = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_lowerCamelCase = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
__SCREAMING_SNAKE_CASE : Optional[int] = raw_datasets['''validation''']
# Validation Feature Creation
__SCREAMING_SNAKE_CASE : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__SCREAMING_SNAKE_CASE : Optional[int] = default_data_collator
__SCREAMING_SNAKE_CASE : Dict = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__SCREAMING_SNAKE_CASE : List[Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : List[Any]="eval" ) -> Any:
# Post-processing: we match the start logits and end logits to answers in the original context.
_lowerCamelCase = postprocess_qa_predictions(
examples=lowercase_ , features=lowercase_ , predictions=lowercase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_lowerCamelCase = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
_lowerCamelCase = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
_lowerCamelCase = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase_ , label_ids=lowercase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCAmelCase_( lowercase_ : Dict ) -> Dict:
return trt.volume(engine.get_binding_shape(lowercase_ ) ) * engine.get_binding_dtype(lowercase_ ).itemsize
# Allocate device memory for inputs and outputs.
__SCREAMING_SNAKE_CASE : Tuple = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__SCREAMING_SNAKE_CASE : Optional[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__SCREAMING_SNAKE_CASE : Optional[int] = cuda.mem_alloc(h_outputa.nbytes)
__SCREAMING_SNAKE_CASE : List[str] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__SCREAMING_SNAKE_CASE : List[str] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F""" Num examples = {len(eval_dataset)}""")
logger.info(F""" Batch size = {args.per_device_eval_batch_size}""")
__SCREAMING_SNAKE_CASE : List[str] = 0.0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Any = timeit.default_timer()
__SCREAMING_SNAKE_CASE : int = None
for step, batch in enumerate(eval_dataloader):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = outputs
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor(start_logits)
__SCREAMING_SNAKE_CASE : Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__SCREAMING_SNAKE_CASE : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
__SCREAMING_SNAKE_CASE : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__SCREAMING_SNAKE_CASE : Union[str, Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
__SCREAMING_SNAKE_CASE : Any = nested_truncate(all_preds, len(eval_dataset))
__SCREAMING_SNAKE_CASE : Optional[Any] = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1_0_0_0 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1_0_0_0))
logger.info('''Total Number of Inference = %d''', niter)
__SCREAMING_SNAKE_CASE : Optional[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__SCREAMING_SNAKE_CASE : Dict = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"""Evaluation metrics: {eval_metric}""")
| 623 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase_( A__ ):
'''simple docstring'''
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.', A__, )
| 623 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
_lowerCamelCase = feature_size
_lowerCamelCase = sampling_rate
_lowerCamelCase = padding_value
_lowerCamelCase = kwargs.pop('''padding_side''' , '''right''' )
_lowerCamelCase = kwargs.pop('''return_attention_mask''' , lowerCamelCase__ )
super().__init__(**lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_lowerCamelCase = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
_lowerCamelCase = processed_features[self.model_input_names[0]]
_lowerCamelCase = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase__ ) == 0:
if return_attention_mask:
_lowerCamelCase = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_lowerCamelCase = required_input[0]
if isinstance(lowerCamelCase__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_lowerCamelCase = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase__ ):
_lowerCamelCase = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase__ ):
_lowerCamelCase = '''tf'''
elif is_torch_tensor(lowerCamelCase__ ):
_lowerCamelCase = '''pt'''
elif isinstance(lowerCamelCase__ , (int, float, list, tuple, np.ndarray) ):
_lowerCamelCase = '''np'''
else:
raise ValueError(
F"""type of {first_element} unknown: {type(lowerCamelCase__ )}. """
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_lowerCamelCase = to_numpy(lowerCamelCase__ )
else:
_lowerCamelCase = [to_numpy(lowerCamelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
_lowerCamelCase = self._get_padding_strategies(padding=lowerCamelCase__ , max_length=lowerCamelCase__ )
_lowerCamelCase = processed_features[self.model_input_names[0]]
_lowerCamelCase = len(lowerCamelCase__ )
if not all(len(lowerCamelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
_lowerCamelCase = []
for i in range(lowerCamelCase__ ):
_lowerCamelCase = {k: v[i] for k, v in processed_features.items()}
# truncation
_lowerCamelCase = self._truncate(
lowerCamelCase__ , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , truncation=lowerCamelCase__ , )
truncated_inputs.append(lowerCamelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_lowerCamelCase = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_lowerCamelCase = PaddingStrategy.MAX_LENGTH
_lowerCamelCase = {}
for i in range(lowerCamelCase__ ):
# padding
_lowerCamelCase = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
for key, value in outputs.items():
if key not in batch_outputs:
_lowerCamelCase = []
if value.dtype is np.dtype(np.floataa ):
_lowerCamelCase = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase__ )
return BatchFeature(lowerCamelCase__ , tensor_type=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
_lowerCamelCase = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_lowerCamelCase = len(lowerCamelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowerCamelCase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowerCamelCase = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_lowerCamelCase = np.ones(len(lowerCamelCase__ ) , dtype=np.intaa )
if needs_to_be_padded:
_lowerCamelCase = max_length - len(lowerCamelCase__ )
if self.padding_side == "right":
if return_attention_mask:
_lowerCamelCase = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
_lowerCamelCase = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_lowerCamelCase = np.pad(
lowerCamelCase__ , lowerCamelCase__ , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_lowerCamelCase = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
_lowerCamelCase = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_lowerCamelCase = np.pad(
lowerCamelCase__ , lowerCamelCase__ , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
_lowerCamelCase = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowerCamelCase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowerCamelCase = len(lowerCamelCase__ ) > max_length
if needs_to_be_truncated:
_lowerCamelCase = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_lowerCamelCase = processed_features['''attention_mask'''][:max_length]
return processed_features
def snake_case__ ( self , lowerCamelCase__=False , lowerCamelCase__=None ):
# Get padding strategy
if padding is not False:
if padding is True:
_lowerCamelCase = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = PaddingStrategy(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = padding
else:
_lowerCamelCase = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 623 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] ) -> Dict:
# Load configuration defined in the metadata file
with open(lowercase_ ) as metadata_file:
_lowerCamelCase = json.load(lowercase_ )
_lowerCamelCase = LukeConfig(use_entity_aware_attention=lowercase_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' )
# Load the entity vocab file
_lowerCamelCase = load_entity_vocab(lowercase_ )
_lowerCamelCase = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCamelCase = AddedToken('''<ent>''' , lstrip=lowercase_ , rstrip=lowercase_ )
_lowerCamelCase = AddedToken('''<ent2>''' , lstrip=lowercase_ , rstrip=lowercase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_lowerCamelCase = state_dict['''embeddings.word_embeddings.weight''']
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_lowerCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCamelCase = F"""encoder.layer.{layer_index}.attention.self."""
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCamelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_lowerCamelCase = entity_emb[entity_vocab['''[MASK]''']]
_lowerCamelCase = LukeModel(config=lowercase_ ).eval()
_lowerCamelCase , _lowerCamelCase = model.load_state_dict(lowercase_ , strict=lowercase_ )
if not (len(lowercase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(lowercase_ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ , task='''entity_classification''' )
_lowerCamelCase = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_lowerCamelCase = (39, 42)
_lowerCamelCase = tokenizer(lowercase_ , entity_spans=[span] , add_prefix_space=lowercase_ , return_tensors='''pt''' )
_lowerCamelCase = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 42, 10_24) )
_lowerCamelCase = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 42, 7_68) )
_lowerCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 1, 10_24) )
_lowerCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 1, 7_68) )
_lowerCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Any:
_lowerCamelCase = {}
with open(lowercase_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(lowercase_ ):
_lowerCamelCase , _lowerCamelCase = line.rstrip().split('''\t''' )
_lowerCamelCase = index
return entity_vocab
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 623 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Any = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=3_0 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=3 , lowerCamelCase__=0.6 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = mask_ratio
_lowerCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase = (image_size // patch_size) ** 2
_lowerCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEModel(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
# expected sequence length = num_patches
_lowerCamelCase = (self.image_size // self.patch_size) ** 2
_lowerCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
_lowerCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase__ : Dict = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
lowercase__ : str = False
lowercase__ : List[str] = False
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , tf.keras.layers.Layer ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = outputs_dict[0].numpy()
_lowerCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase__ ):
_lowerCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase__ ):
_lowerCamelCase = v.numpy()
else:
_lowerCamelCase = np.array(lowerCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = prepare_numpy_arrays(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# make masks reproducible
np.random.seed(2 )
_lowerCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase__ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(lowerCamelCase__ , lowerCamelCase__ ),)
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase__ , '''_keras_serializable''' , lowerCamelCase__ )
}
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
_lowerCamelCase = main_layer_class(lowerCamelCase__ )
_lowerCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_lowerCamelCase = tf.keras.Model(lowerCamelCase__ , outputs=main_layer(lowerCamelCase__ ) )
_lowerCamelCase = model(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''keras_model.h5''' )
model.save(lowerCamelCase__ )
_lowerCamelCase = tf.keras.models.load_model(
lowerCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase__ , tf.keras.Model )
_lowerCamelCase = model(lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = outputs.last_hidden_state.numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = outputs.logits.numpy()
_lowerCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ , saved_model=lowerCamelCase__ )
_lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = after_outputs['''last_hidden_state'''].numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = after_outputs['''logits'''].numpy()
_lowerCamelCase = 0
_lowerCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1e-5 )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase__ )
_lowerCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_lowerCamelCase = model_class.from_config(model.config )
_lowerCamelCase = new_model(lowerCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
_lowerCamelCase = new_model(lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def snake_case__ ( self ):
pass
@slow
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_lowerCamelCase = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase = ViTMAEConfig()
_lowerCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
# verify the logits
_lowerCamelCase = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
| 623 | 1 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCAmelCase_( lowercase_ : str = "isbn/0140328726" ) -> dict:
_lowerCamelCase = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
_lowerCamelCase = F"""{olid} is not a valid Open Library olid"""
raise ValueError(lowercase_ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def lowerCAmelCase_( lowercase_ : dict ) -> dict:
_lowerCamelCase = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
_lowerCamelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_lowerCamelCase = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
_lowerCamelCase = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(lowercase_ , lowercase_ ):
_lowerCamelCase = ''', '''.join(lowercase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__SCREAMING_SNAKE_CASE : int = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
__SCREAMING_SNAKE_CASE : Dict = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 623 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_( lowercase_ : str = "laptop" ) -> DataFrame:
_lowerCamelCase = F"""https://www.amazon.in/laptop/s?k={product}"""
_lowerCamelCase = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_lowerCamelCase = BeautifulSoup(requests.get(lowercase_ , headers=lowercase_ ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCamelCase = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
_lowerCamelCase = item.ha.text
_lowerCamelCase = '''https://www.amazon.in/''' + item.ha.a['''href''']
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
_lowerCamelCase = '''Not available'''
try:
_lowerCamelCase = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
_lowerCamelCase = ''''''
try:
_lowerCamelCase = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
_lowerCamelCase = float('''nan''' )
except AttributeError:
pass
_lowerCamelCase = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCamelCase = ''' '''
_lowerCamelCase = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = '''headphones'''
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 623 | 1 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : list[int] ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
_lowerCamelCase = sum(lowercase_ ) / len(lowercase_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=3.6 ):
_lowerCamelCase = tokenizer
_lowerCamelCase = tokenizer.bos_token_id
_lowerCamelCase = dataset
_lowerCamelCase = seq_length
_lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
_lowerCamelCase = iter(self.dataset )
_lowerCamelCase = True
while more_examples:
_lowerCamelCase , _lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase = False
break
_lowerCamelCase = tokenizer(lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
_lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCamelCase__ ) , self.seq_length ):
_lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(lowerCamelCase__ ) == self.seq_length:
yield torch.tensor(lowerCamelCase__ )
def lowerCAmelCase_( lowercase_ : Any ) -> Optional[Any]:
_lowerCamelCase = {'''streaming''': True}
_lowerCamelCase = load_dataset(args.dataset_name , split='''train''' , **lowercase_ )
_lowerCamelCase = ConstantLengthDataset(lowercase_ , lowercase_ , seq_length=args.seq_length )
_lowerCamelCase = DataLoader(lowercase_ , batch_size=args.batch_size )
return eval_dataloader
def lowerCAmelCase_( lowercase_ : Tuple ) -> str:
model.eval()
_lowerCamelCase = []
for step, batch in enumerate(lowercase_ ):
with torch.no_grad():
_lowerCamelCase = model(lowercase_ , labels=lowercase_ )
_lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowercase_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase = torch.mean(torch.cat(lowercase_ ) )
try:
_lowerCamelCase = torch.exp(lowercase_ )
except OverflowError:
_lowerCamelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
__SCREAMING_SNAKE_CASE : Dict = Accelerator()
# Parse configuration
__SCREAMING_SNAKE_CASE : Tuple = HfArgumentParser(EvaluationArguments)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
set_seed(args.seed)
# Logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
__SCREAMING_SNAKE_CASE : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__SCREAMING_SNAKE_CASE : str = create_dataloader(args)
# Prepare everything with our `accelerator`.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 623 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Union[str, Any] = ['image_processor', 'tokenizer']
lowercase__ : Any = 'LayoutLMv3ImageProcessor'
lowercase__ : Optional[int] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ):
_lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase__ , )
_lowerCamelCase = kwargs.pop('''feature_extractor''' )
_lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , **lowerCamelCase__ , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
_lowerCamelCase = self.image_processor(images=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
_lowerCamelCase = features['''words''']
_lowerCamelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
# add pixel values
_lowerCamelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
_lowerCamelCase = self.get_overflowing_images(lowerCamelCase__ , encoded_inputs['''overflow_to_sample_mapping'''] )
_lowerCamelCase = images
return encoded_inputs
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_lowerCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F""" {len(lowerCamelCase__ )} and {len(lowerCamelCase__ )}""" )
return images_with_overflow
def snake_case__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def snake_case__ ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def snake_case__ ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCamelCase__ , )
return self.image_processor_class
@property
def snake_case__ ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCamelCase__ , )
return self.image_processor
| 623 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float = 1e-12 , lowercase_ : int = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[1]
# Ensure proper dimensionality.
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowercase_ ) == np.iscomplexobj(lowercase_ )
_lowerCamelCase = np.iscomplexobj(lowercase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowercase_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 1e12
while not convergence:
# Multiple matrix by the vector.
_lowerCamelCase = np.dot(lowercase_ , lowercase_ )
# Normalize the resulting output vector.
_lowerCamelCase = w / np.linalg.norm(lowercase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowerCamelCase = vector.conj().T if is_complex else vector.T
_lowerCamelCase = np.dot(lowercase_ , np.dot(lowercase_ , lowercase_ ) )
# Check convergence.
_lowerCamelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowerCamelCase = True
_lowerCamelCase = lambda_
if is_complex:
_lowerCamelCase = np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_( ) -> None:
_lowerCamelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowerCamelCase = np.array([41, 4, 20] )
_lowerCamelCase = real_input_matrix.astype(np.complexaaa )
_lowerCamelCase = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowerCamelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowerCamelCase = real_input_matrix
_lowerCamelCase = real_vector
elif problem_type == "complex":
_lowerCamelCase = complex_input_matrix
_lowerCamelCase = complex_vector
# Our implementation.
_lowerCamelCase , _lowerCamelCase = power_iteration(lowercase_ , lowercase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(lowercase_ )
# Last eigenvalue is the maximum one.
_lowerCamelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowerCamelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowercase_ ) - np.abs(lowercase_ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 623 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Any:
_lowerCamelCase = SwinConfig(image_size=1_92 )
if "base" in model_name:
_lowerCamelCase = 6
_lowerCamelCase = 1_28
_lowerCamelCase = (2, 2, 18, 2)
_lowerCamelCase = (4, 8, 16, 32)
elif "large" in model_name:
_lowerCamelCase = 12
_lowerCamelCase = 1_92
_lowerCamelCase = (2, 2, 18, 2)
_lowerCamelCase = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
_lowerCamelCase = window_size
_lowerCamelCase = embed_dim
_lowerCamelCase = depths
_lowerCamelCase = num_heads
return config
def lowerCAmelCase_( lowercase_ : Dict ) -> List[str]:
if "encoder.mask_token" in name:
_lowerCamelCase = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
_lowerCamelCase = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
_lowerCamelCase = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
_lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_lowerCamelCase = '''layernorm.weight'''
if name == "encoder.norm.bias":
_lowerCamelCase = '''layernorm.bias'''
if "decoder" in name:
pass
else:
_lowerCamelCase = '''swin.''' + name
return name
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[Any] ) -> Dict:
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(lowercase_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
_lowerCamelCase = key.split('''.''' )
_lowerCamelCase = int(key_split[2] )
_lowerCamelCase = int(key_split[4] )
_lowerCamelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[
dim : dim * 2, :
]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[
:dim
]
_lowerCamelCase = val[
dim : dim * 2
]
_lowerCamelCase = val[
-dim:
]
else:
_lowerCamelCase = val
return orig_state_dict
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Optional[int] ) -> Union[str, Any]:
_lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' )['''model''']
_lowerCamelCase = get_swin_config(lowercase_ )
_lowerCamelCase = SwinForMaskedImageModeling(lowercase_ )
model.eval()
_lowerCamelCase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = ViTImageProcessor(size={'''height''': 1_92, '''width''': 1_92} )
_lowerCamelCase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
_lowerCamelCase = image_processor(images=lowercase_ , return_tensors='''pt''' )
with torch.no_grad():
_lowerCamelCase = model(**lowercase_ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 623 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_( lowercase_ : int ) -> list[int]:
_lowerCamelCase = 2
_lowerCamelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowercase_ )
if n > 1:
factors.append(lowercase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__SCREAMING_SNAKE_CASE : List[str] = numpy.array([0, 0])
__SCREAMING_SNAKE_CASE : Optional[Any] = numpy.array([0.5, 0.866_0254])
__SCREAMING_SNAKE_CASE : Tuple = numpy.array([1, 0])
__SCREAMING_SNAKE_CASE : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] , lowercase_ : int ) -> list[numpy.ndarray]:
_lowerCamelCase = initial_vectors
for _ in range(lowercase_ ):
_lowerCamelCase = iteration_step(lowercase_ )
return vectors
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
_lowerCamelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCamelCase = vectors[i + 1]
new_vectors.append(lowercase_ )
_lowerCamelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase_( lowercase_ : numpy.ndarray , lowercase_ : float ) -> numpy.ndarray:
_lowerCamelCase = numpy.radians(lowercase_ )
_lowerCamelCase , _lowerCamelCase = numpy.cos(lowercase_ ), numpy.sin(lowercase_ )
_lowerCamelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> None:
_lowerCamelCase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCamelCase , _lowerCamelCase = zip(*lowercase_ )
plt.plot(lowercase_ , lowercase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : str = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 623 | 1 |
"""simple docstring"""
import os
import string
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = 1 << 8
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
__SCREAMING_SNAKE_CASE : List[str] = KEYMAP['''up''']
__SCREAMING_SNAKE_CASE : str = KEYMAP['''left''']
if sys.platform == "win32":
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : List[str] = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
__SCREAMING_SNAKE_CASE : Tuple = ord(str(i))
def lowerCAmelCase_( ) -> str:
if os.name == "nt":
import msvcrt
_lowerCamelCase = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase_ ) == 0:
# Read the keystroke
_lowerCamelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCamelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCamelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(lowercase_ )
if ord(lowercase_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
_lowerCamelCase = chr(KEYMAP['''esc'''] )
except KeyError:
_lowerCamelCase = cha[1]
else:
_lowerCamelCase = ch.decode(lowercase_ )
else:
_lowerCamelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCamelCase = sys.stdin.fileno()
_lowerCamelCase = termios.tcgetattr(lowercase_ )
try:
tty.setraw(lowercase_ )
_lowerCamelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase_ , termios.TCSADRAIN , lowercase_ )
return ch
def lowerCAmelCase_( ) -> List[str]:
_lowerCamelCase = get_raw_chars()
if ord(lowercase_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase_ ) == KEYMAP["esc"]:
_lowerCamelCase = get_raw_chars()
if ord(lowercase_ ) == KEYMAP["mod_int"]:
_lowerCamelCase = get_raw_chars()
if ord(lowercase_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 623 |
"""simple docstring"""
from typing import Any
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = data
_lowerCamelCase = None
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = None
def snake_case__ ( self ):
_lowerCamelCase = self.head
while temp is not None:
print(temp.data , end=''' ''' )
_lowerCamelCase = temp.next
print()
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = Node(lowerCamelCase__ )
_lowerCamelCase = self.head
_lowerCamelCase = new_node
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
if node_data_a == node_data_a:
return
else:
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
if node_a is None or node_a is None:
return
_lowerCamelCase , _lowerCamelCase = node_a.data, node_a.data
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 623 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = '''▁'''
__SCREAMING_SNAKE_CASE : str = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
__SCREAMING_SNAKE_CASE : int = {
'''facebook/nllb-200-distilled-600M''': 1_0_2_4,
}
# fmt: off
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[Any] = VOCAB_FILES_NAMES
lowercase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[Any] = ['input_ids', 'attention_mask']
lowercase__ : List[int] = []
lowercase__ : List[int] = []
def __init__( self , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__ = None , lowerCamelCase__=None , lowerCamelCase__=False , **lowerCamelCase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
_lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase = legacy_behaviour
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowerCamelCase__ , **lowerCamelCase__ , )
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
_lowerCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCamelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCamelCase = 1
_lowerCamelCase = len(self.sp_model )
_lowerCamelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCamelCase__ )
}
_lowerCamelCase = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCamelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCamelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_lowerCamelCase = src_lang if src_lang is not None else '''eng_Latn'''
_lowerCamelCase = self.lang_code_to_id[self._src_lang]
_lowerCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = None
_lowerCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase__ ):
_lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase = {}
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def snake_case__ ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def snake_case__ ( self ):
return self._src_lang
@src_lang.setter
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
_lowerCamelCase = [1] * len(self.prefix_tokens )
_lowerCamelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase__ )) + ([0] * len(lowerCamelCase__ )) + suffix_ones
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_lowerCamelCase = src_lang
_lowerCamelCase = self(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = self.convert_tokens_to_ids(lowerCamelCase__ )
_lowerCamelCase = tgt_lang_id
return inputs
def snake_case__ ( self ):
_lowerCamelCase = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__ ( self , lowerCamelCase__ ):
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase = self.sp_model.PieceToId(lowerCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case__ ( self , lowerCamelCase__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = ''''''.join(lowerCamelCase__ ).replace(lowerCamelCase__ , ''' ''' ).strip()
return out_string
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
_lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = "eng_Latn" , lowerCamelCase__ = None , lowerCamelCase__ = "fra_Latn" , **lowerCamelCase__ , ):
_lowerCamelCase = src_lang
_lowerCamelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case__ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_lowerCamelCase = []
_lowerCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCamelCase = [self.cur_lang_code]
_lowerCamelCase = [self.eos_token_id]
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_lowerCamelCase = []
_lowerCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCamelCase = [self.cur_lang_code]
_lowerCamelCase = [self.eos_token_id]
| 623 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__SCREAMING_SNAKE_CASE : Optional[Any] = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCAmelCase_( lowercase_ : List[Any] ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[str] ) -> List[str]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
| 623 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 623 | 1 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=0 ) -> Optional[int]:
# Format the message.
if name is None:
_lowerCamelCase = None
else:
_lowerCamelCase = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}'''
_lowerCamelCase = fmt.format(lowercase_ )
# Print and recurse (if needed).
if isinstance(lowercase_ , lowercase_ ):
if msg is not None:
print(lowercase_ )
for k in val.keys():
recursive_print(lowercase_ , val[k] , spaces + 2 )
elif isinstance(lowercase_ , torch.Tensor ):
print(lowercase_ , ''':''' , val.size() )
else:
print(lowercase_ , ''':''' , lowercase_ )
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Optional[Any] ) -> Optional[Any]:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_lowerCamelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_lowerCamelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
_lowerCamelCase = param.view(*lowercase_ )
_lowerCamelCase = param.transpose(0 , 2 )
_lowerCamelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_lowerCamelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
_lowerCamelCase = param.view(*lowercase_ )
_lowerCamelCase = param.transpose(0 , 1 ).contiguous()
_lowerCamelCase = param.view(*lowercase_ )
return param
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : str ) -> Tuple:
# The converted output model.
_lowerCamelCase = {}
# old versions did not store training args
_lowerCamelCase = input_state_dict.get('''args''' , lowercase_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_lowerCamelCase = ds_args.padded_vocab_size
_lowerCamelCase = ds_args.max_position_embeddings
_lowerCamelCase = ds_args.hidden_size
_lowerCamelCase = ds_args.num_layers
_lowerCamelCase = ds_args.num_attention_heads
_lowerCamelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_lowerCamelCase = config.n_head
# The hidden_size per head.
_lowerCamelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_lowerCamelCase = input_state_dict['''checkpoint_version''']
else:
_lowerCamelCase = 0.0
# The model.
_lowerCamelCase = input_state_dict['''model''']
# The language model.
_lowerCamelCase = model['''language_model''']
# The embeddings.
_lowerCamelCase = lm['''embedding''']
# The word embeddings.
_lowerCamelCase = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
_lowerCamelCase = word_embeddings[: config.vocab_size, :]
_lowerCamelCase = word_embeddings
# The position embeddings.
_lowerCamelCase = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_lowerCamelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
_lowerCamelCase = pos_embeddings
# The transformer.
_lowerCamelCase = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
_lowerCamelCase = re.compile(r'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
_lowerCamelCase = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_lowerCamelCase = layer_re.match(lowercase_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_lowerCamelCase = int(m.group(1 ) )
# The name of the operation.
_lowerCamelCase = m.group(2 )
# Is it a weight or a bias?
_lowerCamelCase = m.group(3 )
# The name of the layer.
_lowerCamelCase = F"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
_lowerCamelCase = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
_lowerCamelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_lowerCamelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowercase_ , lowercase_ )
_lowerCamelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
_lowerCamelCase = torch.tensor(-1e4 , dtype=torch.floataa )
_lowerCamelCase = masked_bias
_lowerCamelCase = fix_query_key_value_ordering(lowercase_ , lowercase_ , 3 , lowercase_ , lowercase_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_lowerCamelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
_lowerCamelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_lowerCamelCase = fix_query_key_value_ordering(lowercase_ , lowercase_ , 3 , lowercase_ , lowercase_ )
# Store. No change of shape.
_lowerCamelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_lowerCamelCase = megatron_to_transformers[op_name]
_lowerCamelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
_lowerCamelCase = megatron_to_transformers[op_name]
_lowerCamelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_lowerCamelCase = transformer['''final_layernorm.weight''']
_lowerCamelCase = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
_lowerCamelCase = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_( ) -> List[Any]:
# Create the argument parser.
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=lowercase_ , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=lowercase_ , help='''An optional config json file describing the pre-trained model.''' , )
_lowerCamelCase = parser.parse_args()
# Extract the basename.
_lowerCamelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
_lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' )
else:
_lowerCamelCase = torch.load(args.path_to_checkpoint , map_location='''cpu''' )
_lowerCamelCase = input_state_dict.get('''args''' , lowercase_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_lowerCamelCase = '''gelu_fast'''
elif ds_args.openai_gelu:
_lowerCamelCase = '''gelu_new'''
else:
_lowerCamelCase = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
_lowerCamelCase = '''gelu_new'''
# Spell out all parameters in case the defaults change.
_lowerCamelCase = GPTaConfig(
vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=lowercase_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.0_2 , summary_type='''cls_index''' , summary_use_proj=lowercase_ , summary_activation=lowercase_ , summary_proj_to_labels=lowercase_ , summary_first_dropout=0.1 , scale_attn_weights=lowercase_ , use_cache=lowercase_ , bos_token_id=5_02_56 , eos_token_id=5_02_56 , )
else:
_lowerCamelCase = GPTaConfig.from_json_file(args.config_file )
_lowerCamelCase = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
_lowerCamelCase = convert_megatron_checkpoint(lowercase_ , lowercase_ , lowercase_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowercase_ , lowercase_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_lowerCamelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_lowerCamelCase = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
_lowerCamelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
_lowerCamelCase = '''gpt2'''
_lowerCamelCase = AutoTokenizer.from_pretrained(lowercase_ )
_lowerCamelCase = type(lowercase_ ).__name__
_lowerCamelCase = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(lowercase_ )
# Save tokenizer based on args
print(F"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(lowercase_ )
# Store the state_dict to file.
_lowerCamelCase = os.path.join(lowercase_ , '''pytorch_model.bin''' )
print(F"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(lowercase_ , lowercase_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 623 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=9_9 , lowerCamelCase__=1_3 , lowerCamelCase__=1_6 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=2 , lowerCamelCase__=3_2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=3_0 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = decoder_seq_length
# For common tests
_lowerCamelCase = self.decoder_seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = d_model
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = eos_token_id
_lowerCamelCase = bos_token_id
_lowerCamelCase = pad_token_id
_lowerCamelCase = decoder_start_token_id
_lowerCamelCase = use_cache
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = None
_lowerCamelCase = decoder_seq_length
_lowerCamelCase = 2
_lowerCamelCase = 1
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
_lowerCamelCase = True
_lowerCamelCase = TrOCRDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
_lowerCamelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
_lowerCamelCase = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase = model(lowerCamelCase__ )['''last_hidden_state''']
_lowerCamelCase = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ )['''last_hidden_state''']
# select random slice
_lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase__ : List[str] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowercase__ : Tuple = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
lowercase__ : Dict = True
lowercase__ : Optional[Any] = False
def snake_case__ ( self ):
_lowerCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCamelCase__ )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCamelCase__ )
def snake_case__ ( self ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def snake_case__ ( self ):
pass
| 623 | 1 |
"""simple docstring"""
from typing import Any
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = data
_lowerCamelCase = None
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = None
def snake_case__ ( self ):
_lowerCamelCase = self.head
while temp is not None:
print(temp.data , end=''' ''' )
_lowerCamelCase = temp.next
print()
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = Node(lowerCamelCase__ )
_lowerCamelCase = self.head
_lowerCamelCase = new_node
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
if node_data_a == node_data_a:
return
else:
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
if node_a is None or node_a is None:
return
_lowerCamelCase , _lowerCamelCase = node_a.data, node_a.data
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 623 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__=None , **lowerCamelCase__ ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCamelCase__ , )
super().__init__(args=lowerCamelCase__ , **lowerCamelCase__ )
| 623 | 1 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=2 , lowerCamelCase__=2_4 , lowerCamelCase__=1_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=None , lowerCamelCase__=2 , lowerCamelCase__=2 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = patch_size
_lowerCamelCase = max_length
_lowerCamelCase = num_mel_bins
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = scope
_lowerCamelCase = frequency_stride
_lowerCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase = frequency_out_dimension * time_out_dimension
_lowerCamelCase = num_patches + 2
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, input_values, labels
def snake_case__ ( self ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Dict = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase__ : str = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
lowercase__ : str = False
lowercase__ : List[str] = False
lowercase__ : List[str] = False
lowercase__ : Dict = False
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case__ ( self ):
_lowerCamelCase = ASTModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''input_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def snake_case__ ( self ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> Optional[Any]:
_lowerCamelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase = torchaudio.load(lowercase_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def snake_case__ ( self ):
_lowerCamelCase = self.default_feature_extractor
_lowerCamelCase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(lowerCamelCase__ )
_lowerCamelCase = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase = prepare_audio()
_lowerCamelCase = audio.squeeze().numpy()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
_lowerCamelCase = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 623 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=5_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=7 , lowerCamelCase__="gelu_new" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=4 , lowerCamelCase__="block_sparse" , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=2 , lowerCamelCase__=3 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_choices
_lowerCamelCase = rescale_embeddings
_lowerCamelCase = attention_type
_lowerCamelCase = use_bias
_lowerCamelCase = block_size
_lowerCamelCase = num_random_blocks
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[str] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def snake_case__ ( self ):
_lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_hidden_states_output()
@slow
def snake_case__ ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , **lowerCamelCase__ )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1e-5 , lowerCamelCase__="outputs" , lowerCamelCase__=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
| 623 | 1 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : int , lowercase_ : int ) -> int:
while b:
_lowerCamelCase , _lowerCamelCase = b, a % b
return a
def lowerCAmelCase_( lowercase_ : int , lowercase_ : int ) -> int:
return a if b == 0 else euclidean_gcd_recursive(lowercase_ , a % b )
def lowerCAmelCase_( ) -> Optional[int]:
print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 623 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = StableDiffusionXLImgaImgPipeline
lowercase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase__ : int = PipelineTesterMixin.required_optional_params - {'latents'}
lowercase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase__ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
_lowerCamelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
_lowerCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , )
_lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = CLIPTextModelWithProjection(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
_lowerCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_lowerCamelCase = image / 2 + 0.5
if str(lowerCamelCase__ ).startswith('''mps''' ):
_lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = sd_pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# forward without prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = negative_prompt
_lowerCamelCase = 3 * [inputs['''prompt''']]
_lowerCamelCase = sd_pipe(**lowerCamelCase__ )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = sd_pipe.encode_prompt(lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
_lowerCamelCase = sd_pipe(
**lowerCamelCase__ , prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , pooled_prompt_embeds=lowerCamelCase__ , negative_pooled_prompt_embeds=lowerCamelCase__ , )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=0 ):
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 6_4, 6_4) )
_lowerCamelCase = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs(lowerCamelCase__ )
_lowerCamelCase = pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 623 | 1 |
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=[] ) -> Dict:
_lowerCamelCase = size[0] - overlap_pixels * 2
_lowerCamelCase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_lowerCamelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
_lowerCamelCase = np.pad(lowercase_ , mode='''linear_ramp''' , pad_width=lowercase_ , end_values=0 )
if "l" in remove_borders:
_lowerCamelCase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_lowerCamelCase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_lowerCamelCase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_lowerCamelCase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : Any , lowercase_ : List[str] ) -> int:
return max(lowercase_ , min(lowercase_ , lowercase_ ) )
def lowerCAmelCase_( lowercase_ : [int] , lowercase_ : [int] , lowercase_ : [int] ) -> List[Any]:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowerCAmelCase_( lowercase_ : [int] , lowercase_ : int , lowercase_ : [int] ) -> Optional[Any]:
_lowerCamelCase = list(lowercase_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_lowerCamelCase = clamp_rect(lowercase_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Dict ) -> Optional[Any]:
_lowerCamelCase = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(lowercase_ , (original_slice, 0) )
return result
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Dict ) -> Optional[Any]:
_lowerCamelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_lowerCamelCase = tile.crop(lowercase_ )
return tile
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : Optional[int] ) -> Union[str, Any]:
_lowerCamelCase = n % d
return n - divisor
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 3_5_0 , ):
super().__init__(
vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , unet=lowerCamelCase__ , low_res_scheduler=lowerCamelCase__ , scheduler=lowerCamelCase__ , max_noise_level=lowerCamelCase__ , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
torch.manual_seed(0 )
_lowerCamelCase = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_lowerCamelCase = add_overlap_rect(lowerCamelCase__ , lowerCamelCase__ , image.size )
_lowerCamelCase = image.crop(lowerCamelCase__ )
_lowerCamelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_lowerCamelCase = translated_slice_x - (original_image_slice / 2)
_lowerCamelCase = max(0 , lowerCamelCase__ )
_lowerCamelCase = squeeze_tile(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = to_input.size
_lowerCamelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_lowerCamelCase = super(lowerCamelCase__ , self ).__call__(image=lowerCamelCase__ , **lowerCamelCase__ ).images[0]
_lowerCamelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_lowerCamelCase = unsqueeze_tile(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_lowerCamelCase = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
_lowerCamelCase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=lowerCamelCase__ ) , mode='''L''' , )
final_image.paste(
lowerCamelCase__ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , lowerCamelCase__ )
@torch.no_grad()
def __call__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 7_5 , lowerCamelCase__ = 9.0 , lowerCamelCase__ = 5_0 , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1_2_8 , lowerCamelCase__ = 3_2 , lowerCamelCase__ = 3_2 , ):
_lowerCamelCase = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
_lowerCamelCase = math.ceil(image.size[0] / tile_size )
_lowerCamelCase = math.ceil(image.size[1] / tile_size )
_lowerCamelCase = tcx * tcy
_lowerCamelCase = 0
for y in range(lowerCamelCase__ ):
for x in range(lowerCamelCase__ ):
self._process_tile(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , prompt=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , noise_level=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def lowerCAmelCase_( ) -> Dict:
# Run a demo
_lowerCamelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
_lowerCamelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(lowercase_ , revision='''fp16''' , torch_dtype=torch.floataa )
_lowerCamelCase = pipe.to('''cuda''' )
_lowerCamelCase = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(lowercase_ : Any ):
print(F"""progress: {obj["progress"]:.4f}""" )
obj["image"].save('''diffusers_library_progress.jpg''' )
_lowerCamelCase = pipe(image=lowercase_ , prompt='''Black font, white background, vector''' , noise_level=40 , callback=lowercase_ )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 623 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = ['''MobileViTFeatureExtractor''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Dict = random.Random()
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : int=1.0 , lowercase_ : str=None , lowercase_ : Optional[int]=None ) -> Any:
if rng is None:
_lowerCamelCase = global_rng
_lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=4_0_0 , lowerCamelCase__=2_0_0_0 , lowerCamelCase__=1_0 , lowerCamelCase__=1_6_0 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=4_0_0_0 , lowerCamelCase__=False , lowerCamelCase__=True , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = min_seq_length
_lowerCamelCase = max_seq_length
_lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase = padding_value
_lowerCamelCase = sampling_rate
_lowerCamelCase = return_attention_mask
_lowerCamelCase = do_normalize
_lowerCamelCase = feature_size
_lowerCamelCase = chunk_length
_lowerCamelCase = hop_length
def snake_case__ ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self ):
_lowerCamelCase = WhisperFeatureExtractionTester(self )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase = feature_extractor(lowerCamelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase = np.asarray(lowerCamelCase__ )
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test truncation required
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
_lowerCamelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def snake_case__ ( self ):
import torch
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowerCamelCase = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = WhisperFeatureExtractor()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , lowerCamelCase__ , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = self._load_datasamples(1 )[0]
_lowerCamelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_lowerCamelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
| 623 | 1 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] ) -> Dict:
# Load configuration defined in the metadata file
with open(lowercase_ ) as metadata_file:
_lowerCamelCase = json.load(lowercase_ )
_lowerCamelCase = LukeConfig(use_entity_aware_attention=lowercase_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' )
# Load the entity vocab file
_lowerCamelCase = load_entity_vocab(lowercase_ )
_lowerCamelCase = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCamelCase = AddedToken('''<ent>''' , lstrip=lowercase_ , rstrip=lowercase_ )
_lowerCamelCase = AddedToken('''<ent2>''' , lstrip=lowercase_ , rstrip=lowercase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_lowerCamelCase = state_dict['''embeddings.word_embeddings.weight''']
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_lowerCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCamelCase = F"""encoder.layer.{layer_index}.attention.self."""
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCamelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_lowerCamelCase = entity_emb[entity_vocab['''[MASK]''']]
_lowerCamelCase = LukeModel(config=lowercase_ ).eval()
_lowerCamelCase , _lowerCamelCase = model.load_state_dict(lowercase_ , strict=lowercase_ )
if not (len(lowercase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(lowercase_ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ , task='''entity_classification''' )
_lowerCamelCase = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_lowerCamelCase = (39, 42)
_lowerCamelCase = tokenizer(lowercase_ , entity_spans=[span] , add_prefix_space=lowercase_ , return_tensors='''pt''' )
_lowerCamelCase = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 42, 10_24) )
_lowerCamelCase = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 42, 7_68) )
_lowerCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 1, 10_24) )
_lowerCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 1, 7_68) )
_lowerCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Any:
_lowerCamelCase = {}
with open(lowercase_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(lowercase_ ):
_lowerCamelCase , _lowerCamelCase = line.rstrip().split('''\t''' )
_lowerCamelCase = index
return entity_vocab
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 623 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str ) -> bool:
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_lowerCamelCase = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_lowerCamelCase = True
if a[i].islower():
_lowerCamelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : List[str] ) -> Optional[int]:
# Initialise PyTorch model
_lowerCamelCase = MobileBertConfig.from_json_file(lowercase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
_lowerCamelCase = MobileBertForPreTraining(lowercase_ )
# Load weights from tf checkpoint
_lowerCamelCase = load_tf_weights_in_mobilebert(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 623 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 1 |
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = 3_2 , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 2_5_5 , lowerCamelCase__ = True , lowerCamelCase__ = True , lowerCamelCase__ = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , lowerCamelCase__ = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , lowerCamelCase__ = True , lowerCamelCase__=7 , lowerCamelCase__=3_0 , lowerCamelCase__=4_0_0 , lowerCamelCase__=3 , ):
_lowerCamelCase = parent
_lowerCamelCase = do_resize
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 2_8_8}
_lowerCamelCase = size_divisor
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_normalize
_lowerCamelCase = do_center_crop
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
_lowerCamelCase = do_pad
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
def snake_case__ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=False ):
if not batched:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = image_inputs[0]
if isinstance(lowerCamelCase__ , Image.Image ):
_lowerCamelCase , _lowerCamelCase = image.size
else:
_lowerCamelCase , _lowerCamelCase = image.shape[1], image.shape[2]
_lowerCamelCase = size / min(lowerCamelCase__ , lowerCamelCase__ )
if h < w:
_lowerCamelCase , _lowerCamelCase = size, scale * w
else:
_lowerCamelCase , _lowerCamelCase = scale * h, size
_lowerCamelCase = int((1_3_3_3 / 8_0_0) * size )
if max(lowerCamelCase__ , lowerCamelCase__ ) > max_size:
_lowerCamelCase = max_size / max(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = newh * scale
_lowerCamelCase = neww * scale
_lowerCamelCase , _lowerCamelCase = int(newh + 0.5 ), int(neww + 0.5 )
_lowerCamelCase , _lowerCamelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_lowerCamelCase = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[0] )[0]
_lowerCamelCase = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : str = BridgeTowerImageProcessor if is_vision_available() else None
def snake_case__ ( self ):
_lowerCamelCase = BridgeTowerImageProcessingTester(self )
@property
def snake_case__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self ):
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size_divisor''' ) )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
# Initialize image processor
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self ):
# Initialize image processor
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self ):
# Initialize image processor
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 623 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = '''▁'''
__SCREAMING_SNAKE_CASE : str = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
__SCREAMING_SNAKE_CASE : str = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1_0_2_4,
}
# fmt: off
__SCREAMING_SNAKE_CASE : Dict = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : str = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : int = ['input_ids', 'attention_mask']
lowercase__ : List[int] = []
lowercase__ : List[int] = []
def __init__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__ = None , **lowerCamelCase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
_lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
_lowerCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCamelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCamelCase = 1
_lowerCamelCase = len(self.sp_model )
_lowerCamelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCamelCase__ )
}
_lowerCamelCase = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCamelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCamelCase = src_lang if src_lang is not None else '''en_XX'''
_lowerCamelCase = self.lang_code_to_id[self._src_lang]
_lowerCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case__ ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def snake_case__ ( self ):
return self._src_lang
@src_lang.setter
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = None
return state
def __setstate__( self , lowerCamelCase__ ):
_lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase = {}
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self ):
_lowerCamelCase = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__ ( self , lowerCamelCase__ ):
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase = self.sp_model.PieceToId(lowerCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case__ ( self , lowerCamelCase__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = []
_lowerCamelCase = ''''''
_lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
_lowerCamelCase = True
_lowerCamelCase = []
else:
current_sub_tokens.append(lowerCamelCase__ )
_lowerCamelCase = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
_lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
_lowerCamelCase = [1] * len(self.prefix_tokens )
_lowerCamelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase__ )) + ([0] * len(lowerCamelCase__ )) + suffix_ones
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_lowerCamelCase = src_lang
_lowerCamelCase = self(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = self.convert_tokens_to_ids(lowerCamelCase__ )
_lowerCamelCase = tgt_lang_id
return inputs
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = "en_XX" , lowerCamelCase__ = None , lowerCamelCase__ = "ro_RO" , **lowerCamelCase__ , ):
_lowerCamelCase = src_lang
_lowerCamelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case__ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.lang_code_to_id[src_lang]
_lowerCamelCase = [self.cur_lang_code_id]
_lowerCamelCase = [self.eos_token_id]
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.lang_code_to_id[tgt_lang]
_lowerCamelCase = [self.cur_lang_code_id]
_lowerCamelCase = [self.eos_token_id]
| 623 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
__SCREAMING_SNAKE_CASE : List[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def lowerCAmelCase_( lowercase_ : List[str] ) -> Tuple:
_lowerCamelCase = {}
with open(lowercase_ , '''r''' ) as file:
for line_number, line in enumerate(lowercase_ ):
_lowerCamelCase = line.strip()
if line:
_lowerCamelCase = line.split()
_lowerCamelCase = line_number
_lowerCamelCase = words[0]
_lowerCamelCase = value
return result
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ) -> Optional[int]:
for attribute in key.split('''.''' ):
_lowerCamelCase = getattr(lowercase_ , lowercase_ )
_lowerCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase_ ):
_lowerCamelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
_lowerCamelCase = '''param'''
if weight_type is not None and weight_type != "param":
_lowerCamelCase = getattr(lowercase_ , lowercase_ ).shape
elif weight_type is not None and weight_type == "param":
_lowerCamelCase = hf_pointer
for attribute in hf_param_name.split('''.''' ):
_lowerCamelCase = getattr(lowercase_ , lowercase_ )
_lowerCamelCase = shape_pointer.shape
# let's reduce dimension
_lowerCamelCase = value[0]
else:
_lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
_lowerCamelCase = getattr(lowercase_ , lowercase_ )
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[str] ) -> Dict:
_lowerCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase_ ):
_lowerCamelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
_lowerCamelCase = '''param'''
if weight_type is not None and weight_type != "param":
_lowerCamelCase = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_lowerCamelCase = '''.'''.join([key, hf_param_name] )
else:
_lowerCamelCase = key
_lowerCamelCase = value if '''lm_head''' in full_key else value[0]
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Any=None , lowercase_ : Optional[int]=None ) -> Dict:
_lowerCamelCase = False
for key, mapped_key in MAPPING.items():
_lowerCamelCase = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(lowercase_ )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , lowercase_ )
if "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
else:
_lowerCamelCase = None
if hf_dict is not None:
rename_dict(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return is_used
return is_used
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Tuple ) -> Optional[int]:
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
_lowerCamelCase = load_wavaveca_layer(lowercase_ , lowercase_ , lowercase_ )
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : List[str] ) -> List[Any]:
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : List[Any]=True , lowercase_ : Optional[Any]=False ) -> Any:
if config_path is not None:
_lowerCamelCase = WavaVecaConfig.from_pretrained(lowercase_ )
else:
_lowerCamelCase = WavaVecaConfig()
if is_seq_class:
_lowerCamelCase = read_txt_into_dict(lowercase_ )
_lowerCamelCase = idalabel
_lowerCamelCase = WavaVecaForSequenceClassification(lowercase_ )
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
feature_extractor.save_pretrained(lowercase_ )
elif is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(lowercase_ , '''vocab.json''' )
if not os.path.isdir(lowercase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase_ ) )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 0
_lowerCamelCase = 1
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowercase_ , lowercase_ )
_lowerCamelCase = WavaVecaCTCTokenizer(
lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase_ , )
_lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
_lowerCamelCase = WavaVecaForCTC(lowercase_ )
else:
_lowerCamelCase = WavaVecaForPreTraining(lowercase_ )
if is_finetuned or is_seq_class:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_lowerCamelCase = argparse.Namespace(task='''audio_pretraining''' )
_lowerCamelCase = fairseq.tasks.setup_task(lowercase_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase_ )
_lowerCamelCase = model[0].eval()
recursively_load_weights(lowercase_ , lowercase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
__SCREAMING_SNAKE_CASE : Optional[int] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 623 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCAmelCase_( lowercase_ : list[Any] ) -> None:
create_state_space_tree(lowercase_ , [] , 0 )
def lowerCAmelCase_( lowercase_ : list[Any] , lowercase_ : list[Any] , lowercase_ : int ) -> None:
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 623 | 1 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase_( ) -> str:
raise RuntimeError('''CUDA out of memory.''' )
class lowerCamelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
_lowerCamelCase = nn.Linear(3 , 4 )
_lowerCamelCase = nn.BatchNormad(4 )
_lowerCamelCase = nn.Linear(4 , 5 )
def snake_case__ ( self , lowerCamelCase__ ):
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase__ ) ) )
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCamelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase__ , [1_2_8, 6_4, 3_2, 1_6, 8] )
def snake_case__ ( self ):
_lowerCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCamelCase__ , lowerCamelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_lowerCamelCase , _lowerCamelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(lowerCamelCase__ , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def snake_case__ ( self ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCamelCase__ ):
pass
with self.assertRaises(lowerCamelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self ):
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowerCamelCase__ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self ):
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase__ ) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def snake_case__ ( self ):
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowerCamelCase__ ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(lowerCamelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def snake_case__ ( self ):
_lowerCamelCase = torch.cuda.memory_allocated()
_lowerCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase__ )
_lowerCamelCase = release_memory(lowerCamelCase__ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase__ )
| 623 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase_( A__ ):
'''simple docstring'''
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.', A__, )
| 623 | 1 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Dict = random.Random()
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : int=1.0 , lowercase_ : str=None , lowercase_ : Optional[int]=None ) -> Any:
if rng is None:
_lowerCamelCase = global_rng
_lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=4_0_0 , lowerCamelCase__=2_0_0_0 , lowerCamelCase__=1_0 , lowerCamelCase__=1_6_0 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=4_0_0_0 , lowerCamelCase__=False , lowerCamelCase__=True , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = min_seq_length
_lowerCamelCase = max_seq_length
_lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase = padding_value
_lowerCamelCase = sampling_rate
_lowerCamelCase = return_attention_mask
_lowerCamelCase = do_normalize
_lowerCamelCase = feature_size
_lowerCamelCase = chunk_length
_lowerCamelCase = hop_length
def snake_case__ ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self ):
_lowerCamelCase = WhisperFeatureExtractionTester(self )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase = feature_extractor(lowerCamelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase = np.asarray(lowerCamelCase__ )
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test truncation required
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
_lowerCamelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def snake_case__ ( self ):
import torch
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowerCamelCase = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = WhisperFeatureExtractor()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , lowerCamelCase__ , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = self._load_datasamples(1 )[0]
_lowerCamelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_lowerCamelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
| 623 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] ) -> Dict:
# Load configuration defined in the metadata file
with open(lowercase_ ) as metadata_file:
_lowerCamelCase = json.load(lowercase_ )
_lowerCamelCase = LukeConfig(use_entity_aware_attention=lowercase_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' )
# Load the entity vocab file
_lowerCamelCase = load_entity_vocab(lowercase_ )
_lowerCamelCase = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCamelCase = AddedToken('''<ent>''' , lstrip=lowercase_ , rstrip=lowercase_ )
_lowerCamelCase = AddedToken('''<ent2>''' , lstrip=lowercase_ , rstrip=lowercase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_lowerCamelCase = state_dict['''embeddings.word_embeddings.weight''']
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_lowerCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCamelCase = F"""encoder.layer.{layer_index}.attention.self."""
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCamelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_lowerCamelCase = entity_emb[entity_vocab['''[MASK]''']]
_lowerCamelCase = LukeModel(config=lowercase_ ).eval()
_lowerCamelCase , _lowerCamelCase = model.load_state_dict(lowercase_ , strict=lowercase_ )
if not (len(lowercase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(lowercase_ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ , task='''entity_classification''' )
_lowerCamelCase = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_lowerCamelCase = (39, 42)
_lowerCamelCase = tokenizer(lowercase_ , entity_spans=[span] , add_prefix_space=lowercase_ , return_tensors='''pt''' )
_lowerCamelCase = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 42, 10_24) )
_lowerCamelCase = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 42, 7_68) )
_lowerCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 1, 10_24) )
_lowerCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 1, 7_68) )
_lowerCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Any:
_lowerCamelCase = {}
with open(lowercase_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(lowercase_ ):
_lowerCamelCase , _lowerCamelCase = line.rstrip().split('''\t''' )
_lowerCamelCase = index
return entity_vocab
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 623 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = StableDiffusionLDMaDPipeline
lowercase__ : str = TEXT_TO_IMAGE_PARAMS
lowercase__ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
_lowerCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
_lowerCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
if str(lowerCamelCase__ ).startswith('''mps''' ):
_lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionLDMaDPipeline(**lowerCamelCase__ )
_lowerCamelCase = ldmad_pipe.to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = ldmad_pipe(**lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = output.rgb, output.depth
_lowerCamelCase = rgb[0, -3:, -3:, -1]
_lowerCamelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
_lowerCamelCase = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
_lowerCamelCase = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def snake_case__ ( self ):
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionLDMaDPipeline(**lowerCamelCase__ )
_lowerCamelCase = ldmad_pipe.to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * [inputs['''prompt''']]
# forward
_lowerCamelCase = ldmad_pipe(**lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = output.rgb, output.depth
_lowerCamelCase = rgb_slice_a[0, -3:, -3:, -1]
_lowerCamelCase = depth_slice_a[0, -3:, -1]
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
_lowerCamelCase = ldmad_pipe.tokenizer(
lowerCamelCase__ , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=lowerCamelCase__ , return_tensors='''pt''' , )
_lowerCamelCase = text_inputs['''input_ids'''].to(lowerCamelCase__ )
_lowerCamelCase = ldmad_pipe.text_encoder(lowerCamelCase__ )[0]
_lowerCamelCase = prompt_embeds
# forward
_lowerCamelCase = ldmad_pipe(**lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = output.rgb, output.depth
_lowerCamelCase = rgb_slice_a[0, -3:, -3:, -1]
_lowerCamelCase = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
_lowerCamelCase = StableDiffusionLDMaDPipeline(**lowerCamelCase__ )
_lowerCamelCase = ldmad_pipe.to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = '''french fries'''
_lowerCamelCase = ldmad_pipe(**lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = output.rgb, output.depth
_lowerCamelCase = rgb[0, -3:, -3:, -1]
_lowerCamelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
_lowerCamelCase = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
_lowerCamelCase = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=0 ):
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 6_4, 6_4) )
_lowerCamelCase = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
_lowerCamelCase = ldmad_pipe.to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs(lowerCamelCase__ )
_lowerCamelCase = ldmad_pipe(**lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = output.rgb, output.depth
_lowerCamelCase = rgb[0, -3:, -3:, -1].flatten()
_lowerCamelCase = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2)
_lowerCamelCase = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
_lowerCamelCase = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=0 ):
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 6_4, 6_4) )
_lowerCamelCase = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 5_0,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs(lowerCamelCase__ )
_lowerCamelCase = ldmad_pipe(**lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = output.rgb, output.depth
_lowerCamelCase = 0.4_9_5_5_8_6
_lowerCamelCase = 0.3_3_7_9_5_5_1_5
_lowerCamelCase = 1_1_2.4_8_5_1_8
_lowerCamelCase = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def snake_case__ ( self ):
_lowerCamelCase = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(lowerCamelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs(lowerCamelCase__ )
_lowerCamelCase = ldmad_pipe(**lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = output.rgb, output.depth
_lowerCamelCase = 0.4_1_9_4_1_2_7
_lowerCamelCase = 0.3_5_3_7_5_5_8_6
_lowerCamelCase = 0.5_6_3_8_5_0_2
_lowerCamelCase = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 623 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=3_0 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=3 , lowerCamelCase__=0.6 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = mask_ratio
_lowerCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase = (image_size // patch_size) ** 2
_lowerCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEModel(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
# expected sequence length = num_patches
_lowerCamelCase = (self.image_size // self.patch_size) ** 2
_lowerCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
_lowerCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase__ : Dict = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
lowercase__ : str = False
lowercase__ : List[str] = False
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , tf.keras.layers.Layer ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = outputs_dict[0].numpy()
_lowerCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase__ ):
_lowerCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase__ ):
_lowerCamelCase = v.numpy()
else:
_lowerCamelCase = np.array(lowerCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = prepare_numpy_arrays(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# make masks reproducible
np.random.seed(2 )
_lowerCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase__ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(lowerCamelCase__ , lowerCamelCase__ ),)
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase__ , '''_keras_serializable''' , lowerCamelCase__ )
}
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
_lowerCamelCase = main_layer_class(lowerCamelCase__ )
_lowerCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_lowerCamelCase = tf.keras.Model(lowerCamelCase__ , outputs=main_layer(lowerCamelCase__ ) )
_lowerCamelCase = model(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''keras_model.h5''' )
model.save(lowerCamelCase__ )
_lowerCamelCase = tf.keras.models.load_model(
lowerCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase__ , tf.keras.Model )
_lowerCamelCase = model(lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = outputs.last_hidden_state.numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = outputs.logits.numpy()
_lowerCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ , saved_model=lowerCamelCase__ )
_lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = after_outputs['''last_hidden_state'''].numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = after_outputs['''logits'''].numpy()
_lowerCamelCase = 0
_lowerCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1e-5 )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase__ )
_lowerCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_lowerCamelCase = model_class.from_config(model.config )
_lowerCamelCase = new_model(lowerCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
_lowerCamelCase = new_model(lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def snake_case__ ( self ):
pass
@slow
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_lowerCamelCase = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase = ViTMAEConfig()
_lowerCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
# verify the logits
_lowerCamelCase = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
| 623 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_( A__, A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = StableDiffusionInpaintPipeline
lowercase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase__ : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ : str = frozenset([] )
def snake_case__ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase__ , )
_lowerCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
_lowerCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
_lowerCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('''RGB''' ).resize((6_4, 6_4) )
_lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((6_4, 6_4) )
if str(lowerCamelCase__ ).startswith('''mps''' ):
_lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = sd_pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCamelCase = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ):
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
_lowerCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
_lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase__ , safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
_lowerCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , generator=lowerCamelCase__ , output_type='''np''' , )
_lowerCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def snake_case__ ( self ):
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
_lowerCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
_lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase__ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase__ , )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
_lowerCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , generator=lowerCamelCase__ , output_type='''np''' , )
_lowerCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def snake_case__ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_lowerCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
_lowerCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase__ , subfolder='''scheduler''' )
_lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase__ , safety_checker=lowerCamelCase__ , scheduler=lowerCamelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
_lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 1_0**9
| 623 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_( lowercase_ : str = "laptop" ) -> DataFrame:
_lowerCamelCase = F"""https://www.amazon.in/laptop/s?k={product}"""
_lowerCamelCase = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_lowerCamelCase = BeautifulSoup(requests.get(lowercase_ , headers=lowercase_ ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCamelCase = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
_lowerCamelCase = item.ha.text
_lowerCamelCase = '''https://www.amazon.in/''' + item.ha.a['''href''']
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
_lowerCamelCase = '''Not available'''
try:
_lowerCamelCase = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
_lowerCamelCase = ''''''
try:
_lowerCamelCase = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
_lowerCamelCase = float('''nan''' )
except AttributeError:
pass
_lowerCamelCase = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCamelCase = ''' '''
_lowerCamelCase = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = '''headphones'''
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 623 | 1 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 623 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=3.6 ):
_lowerCamelCase = tokenizer
_lowerCamelCase = tokenizer.bos_token_id
_lowerCamelCase = dataset
_lowerCamelCase = seq_length
_lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
_lowerCamelCase = iter(self.dataset )
_lowerCamelCase = True
while more_examples:
_lowerCamelCase , _lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase = False
break
_lowerCamelCase = tokenizer(lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
_lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCamelCase__ ) , self.seq_length ):
_lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(lowerCamelCase__ ) == self.seq_length:
yield torch.tensor(lowerCamelCase__ )
def lowerCAmelCase_( lowercase_ : Any ) -> Optional[Any]:
_lowerCamelCase = {'''streaming''': True}
_lowerCamelCase = load_dataset(args.dataset_name , split='''train''' , **lowercase_ )
_lowerCamelCase = ConstantLengthDataset(lowercase_ , lowercase_ , seq_length=args.seq_length )
_lowerCamelCase = DataLoader(lowercase_ , batch_size=args.batch_size )
return eval_dataloader
def lowerCAmelCase_( lowercase_ : Tuple ) -> str:
model.eval()
_lowerCamelCase = []
for step, batch in enumerate(lowercase_ ):
with torch.no_grad():
_lowerCamelCase = model(lowercase_ , labels=lowercase_ )
_lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowercase_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase = torch.mean(torch.cat(lowercase_ ) )
try:
_lowerCamelCase = torch.exp(lowercase_ )
except OverflowError:
_lowerCamelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
__SCREAMING_SNAKE_CASE : Dict = Accelerator()
# Parse configuration
__SCREAMING_SNAKE_CASE : Tuple = HfArgumentParser(EvaluationArguments)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
set_seed(args.seed)
# Logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
__SCREAMING_SNAKE_CASE : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__SCREAMING_SNAKE_CASE : str = create_dataloader(args)
# Prepare everything with our `accelerator`.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 623 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Any , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple ) -> List[Any]:
for attribute in key.split('''.''' ):
_lowerCamelCase = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
_lowerCamelCase = getattr(lowercase_ , lowercase_ ).shape
else:
_lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowerCamelCase = value
elif weight_type == "weight_g":
_lowerCamelCase = value
elif weight_type == "weight_v":
_lowerCamelCase = value
elif weight_type == "bias":
_lowerCamelCase = value
elif weight_type == "running_mean":
_lowerCamelCase = value
elif weight_type == "running_var":
_lowerCamelCase = value
elif weight_type == "num_batches_tracked":
_lowerCamelCase = value
elif weight_type == "inv_freq":
_lowerCamelCase = value
else:
_lowerCamelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple:
_lowerCamelCase = []
_lowerCamelCase = fairseq_model.state_dict()
_lowerCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCamelCase = True
if "*" in mapped_key:
_lowerCamelCase = name.split(lowercase_ )[0].split('''.''' )[-2]
_lowerCamelCase = mapped_key.replace('''*''' , lowercase_ )
if "pos_bias_u" in name:
_lowerCamelCase = None
elif "pos_bias_v" in name:
_lowerCamelCase = None
elif "weight_g" in name:
_lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCamelCase = '''weight_v'''
elif "bias" in name:
_lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase = '''weight'''
elif "running_mean" in name:
_lowerCamelCase = '''running_mean'''
elif "inv_freq" in name:
_lowerCamelCase = '''inv_freq'''
elif "running_var" in name:
_lowerCamelCase = '''running_var'''
elif "num_batches_tracked" in name:
_lowerCamelCase = '''num_batches_tracked'''
else:
_lowerCamelCase = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Dict ) -> Union[str, Any]:
_lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCamelCase = name.split('''.''' )
_lowerCamelCase = int(items[0] )
_lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : int , lowercase_ : Tuple , lowercase_ : Any=None , lowercase_ : int=None , lowercase_ : Union[str, Any]=True ) -> int:
if config_path is not None:
_lowerCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase_ , hidden_act='''swish''' )
else:
_lowerCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_lowerCamelCase = '''rotary'''
if is_finetuned:
if dict_path:
_lowerCamelCase = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase = target_dict.pad_index
_lowerCamelCase = target_dict.bos_index
_lowerCamelCase = target_dict.eos_index
_lowerCamelCase = len(target_dict.symbols )
_lowerCamelCase = os.path.join(lowercase_ , '''vocab.json''' )
if not os.path.isdir(lowercase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase_ ) )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
_lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase = 0
_lowerCamelCase = 1
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowercase_ , lowercase_ )
_lowerCamelCase = WavaVecaCTCTokenizer(
lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase_ , )
_lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
_lowerCamelCase = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
_lowerCamelCase = WavaVecaConformerForCTC(lowercase_ )
else:
_lowerCamelCase = WavaVecaConformerForPreTraining(lowercase_ )
if is_finetuned:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_lowerCamelCase = argparse.Namespace(task='''audio_pretraining''' )
_lowerCamelCase = fairseq.tasks.setup_task(lowercase_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase_ )
_lowerCamelCase = model[0].eval()
recursively_load_weights(lowercase_ , lowercase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 623 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float = 1e-12 , lowercase_ : int = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[1]
# Ensure proper dimensionality.
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowercase_ ) == np.iscomplexobj(lowercase_ )
_lowerCamelCase = np.iscomplexobj(lowercase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowercase_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 1e12
while not convergence:
# Multiple matrix by the vector.
_lowerCamelCase = np.dot(lowercase_ , lowercase_ )
# Normalize the resulting output vector.
_lowerCamelCase = w / np.linalg.norm(lowercase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowerCamelCase = vector.conj().T if is_complex else vector.T
_lowerCamelCase = np.dot(lowercase_ , np.dot(lowercase_ , lowercase_ ) )
# Check convergence.
_lowerCamelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowerCamelCase = True
_lowerCamelCase = lambda_
if is_complex:
_lowerCamelCase = np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_( ) -> None:
_lowerCamelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowerCamelCase = np.array([41, 4, 20] )
_lowerCamelCase = real_input_matrix.astype(np.complexaaa )
_lowerCamelCase = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowerCamelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowerCamelCase = real_input_matrix
_lowerCamelCase = real_vector
elif problem_type == "complex":
_lowerCamelCase = complex_input_matrix
_lowerCamelCase = complex_vector
# Our implementation.
_lowerCamelCase , _lowerCamelCase = power_iteration(lowercase_ , lowercase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(lowercase_ )
# Last eigenvalue is the maximum one.
_lowerCamelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowerCamelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowercase_ ) - np.abs(lowercase_ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 623 | 1 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__SCREAMING_SNAKE_CASE : List[Any] = logging.getLogger(__name__)
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Dict ) -> List[str]:
# save results
if os.path.exists(lowercase_ ):
if os.path.exists(os.path.join(lowercase_ , '''config.json''' ) ) and os.path.isfile(
os.path.join(lowercase_ , '''config.json''' ) ):
os.remove(os.path.join(lowercase_ , '''config.json''' ) )
if os.path.exists(os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(lowercase_ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
else:
os.makedirs(lowercase_ )
model.save_pretrained(lowercase_ )
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : int=False ) -> Optional[int]:
_lowerCamelCase = 2
if unlogit:
_lowerCamelCase = torch.pow(lowercase_ , lowercase_ )
_lowerCamelCase = p * torch.log(lowercase_ )
_lowerCamelCase = 0
return -plogp.sum(dim=-1 )
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> List[Any]:
logger.info('''lv, h >\t''' + '''\t'''.join(F"""{x + 1}""" for x in range(len(lowercase_ ) ) ) )
for row in range(len(lowercase_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + '''\t'''.join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + '''\t'''.join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : List[Any]=None , lowercase_ : Dict=False ) -> Union[str, Any]:
_lowerCamelCase , _lowerCamelCase = model.config.num_hidden_layers, model.config.num_attention_heads
_lowerCamelCase = torch.zeros(lowercase_ , lowercase_ ).to(args.device )
_lowerCamelCase = torch.zeros(lowercase_ , lowercase_ ).to(args.device )
if head_mask is None:
_lowerCamelCase = torch.ones(lowercase_ , lowercase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowercase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_lowerCamelCase = None
_lowerCamelCase = 0.0
_lowerCamelCase = 0.0
for step, inputs in enumerate(tqdm(lowercase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
_lowerCamelCase = tuple(t.to(args.device ) for t in inputs )
((_lowerCamelCase) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_lowerCamelCase = model(lowercase_ , labels=lowercase_ , head_mask=lowercase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowercase_ ):
_lowerCamelCase = entropy(attn.detach() , lowercase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowercase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_lowerCamelCase = 2
_lowerCamelCase = torch.pow(torch.pow(lowercase_ , lowercase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_lowerCamelCase = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(lowercase_ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(lowercase_ )
logger.info('''Head ranked by importance scores''' )
_lowerCamelCase = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_lowerCamelCase = torch.arange(
head_importance.numel() , device=args.device )
_lowerCamelCase = head_ranks.view_as(lowercase_ )
print_ad_tensor(lowercase_ )
return attn_entropy, head_importance, total_loss
def lowerCAmelCase_( lowercase_ : int , lowercase_ : int , lowercase_ : Dict ) -> Any:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = compute_heads_importance(lowercase_ , lowercase_ , lowercase_ , compute_entropy=lowercase_ )
_lowerCamelCase = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , lowercase_ , original_score * args.masking_threshold )
_lowerCamelCase = torch.ones_like(lowercase_ )
_lowerCamelCase = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_lowerCamelCase = original_score
while current_score >= original_score * args.masking_threshold:
_lowerCamelCase = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_lowerCamelCase = float('''Inf''' )
_lowerCamelCase = head_importance.view(-1 ).sort()[1]
if len(lowercase_ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
_lowerCamelCase = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
_lowerCamelCase = new_head_mask.view(-1 )
_lowerCamelCase = 0.0
_lowerCamelCase = new_head_mask.view_as(lowercase_ )
_lowerCamelCase = new_head_mask.clone().detach()
print_ad_tensor(lowercase_ )
# Compute metric and head importance again
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = compute_heads_importance(
lowercase_ , lowercase_ , lowercase_ , compute_entropy=lowercase_ , head_mask=lowercase_ )
_lowerCamelCase = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowercase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('''Final head mask''' )
print_ad_tensor(lowercase_ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCAmelCase_( lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[int] ) -> Union[str, Any]:
_lowerCamelCase = datetime.now()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = compute_heads_importance(
lowercase_ , lowercase_ , lowercase_ , compute_entropy=lowercase_ , compute_importance=lowercase_ , head_mask=lowercase_ )
_lowerCamelCase = 1 / loss
_lowerCamelCase = datetime.now() - before_time
_lowerCamelCase = sum(p.numel() for p in model.parameters() )
_lowerCamelCase = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowercase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowercase_ , lowercase_ ):
_lowerCamelCase = [
v,
]
assert sum(len(lowercase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowercase_ )
_lowerCamelCase = sum(p.numel() for p in model.parameters() )
_lowerCamelCase = datetime.now()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = compute_heads_importance(
lowercase_ , lowercase_ , lowercase_ , compute_entropy=lowercase_ , compute_importance=lowercase_ , head_mask=lowercase_ , actually_pruned=lowercase_ , )
_lowerCamelCase = 1 / loss
_lowerCamelCase = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowercase_ , lowercase_ , pruned_num_params / original_num_params * 1_00 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowercase_ , lowercase_ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_00 )
save_model(lowercase_ , args.output_dir )
def lowerCAmelCase_( ) -> Optional[Any]:
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=lowercase_ , type=lowercase_ , required=lowercase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=lowercase_ , type=lowercase_ , required=lowercase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=lowercase_ , type=lowercase_ , required=lowercase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=lowercase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=lowercase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=lowercase_ , type=lowercase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=lowercase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=lowercase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=lowercase_ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=lowercase_ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=lowercase_ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=lowercase_ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=lowercase_ , default=42 )
parser.add_argument('''--local_rank''' , type=lowercase_ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=lowercase_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=lowercase_ , default='''''' , help='''Can be used for distant debugging.''' )
_lowerCamelCase = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowercase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_lowerCamelCase = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
_lowerCamelCase = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_lowerCamelCase = torch.device('''cuda''' , args.local_rank )
_lowerCamelCase = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_lowerCamelCase = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_lowerCamelCase = nn.parallel.DistributedDataParallel(
lowercase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowercase_ )
elif args.n_gpu > 1:
_lowerCamelCase = nn.DataParallel(lowercase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowercase_ )
torch.save(lowercase_ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , lowercase_ )
# Prepare dataset
_lowerCamelCase = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_lowerCamelCase = (torch.from_numpy(lowercase_ ),)
_lowerCamelCase = TensorDataset(*lowercase_ )
_lowerCamelCase = RandomSampler(lowercase_ )
_lowerCamelCase = DataLoader(lowercase_ , sampler=lowercase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowercase_ , lowercase_ , lowercase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_lowerCamelCase = mask_heads(lowercase_ , lowercase_ , lowercase_ )
prune_heads(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 623 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=1_8 , lowerCamelCase__=3_0 , lowerCamelCase__=4_0_0 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , ):
_lowerCamelCase = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = image_size
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = apply_ocr
def snake_case__ ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case__ ( self ):
_lowerCamelCase = LayoutLMvaImageProcessingTester(self )
@property
def snake_case__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self ):
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''apply_ocr''' ) )
def snake_case__ ( self ):
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8} )
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , lowerCamelCase__ )
self.assertIsInstance(encoding.boxes , lowerCamelCase__ )
# Test batched
_lowerCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case__ ( self ):
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_lowerCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case__ ( self ):
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_lowerCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case__ ( self ):
# with apply_OCR = True
_lowerCamelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
_lowerCamelCase = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
_lowerCamelCase = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
_lowerCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_lowerCamelCase = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
_lowerCamelCase = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCamelCase__ )
self.assertListEqual(encoding.boxes , lowerCamelCase__ )
# with apply_OCR = False
_lowerCamelCase = LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ )
_lowerCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 623 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__SCREAMING_SNAKE_CASE : List[str] = numpy.array([0, 0])
__SCREAMING_SNAKE_CASE : Optional[Any] = numpy.array([0.5, 0.866_0254])
__SCREAMING_SNAKE_CASE : Tuple = numpy.array([1, 0])
__SCREAMING_SNAKE_CASE : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] , lowercase_ : int ) -> list[numpy.ndarray]:
_lowerCamelCase = initial_vectors
for _ in range(lowercase_ ):
_lowerCamelCase = iteration_step(lowercase_ )
return vectors
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
_lowerCamelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCamelCase = vectors[i + 1]
new_vectors.append(lowercase_ )
_lowerCamelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase_( lowercase_ : numpy.ndarray , lowercase_ : float ) -> numpy.ndarray:
_lowerCamelCase = numpy.radians(lowercase_ )
_lowerCamelCase , _lowerCamelCase = numpy.cos(lowercase_ ), numpy.sin(lowercase_ )
_lowerCamelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> None:
_lowerCamelCase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCamelCase , _lowerCamelCase = zip(*lowercase_ )
plt.plot(lowercase_ , lowercase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : str = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 623 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__SCREAMING_SNAKE_CASE : Optional[int] = 2_5_0_0_0_4
__SCREAMING_SNAKE_CASE : Optional[Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Any = MBartaaTokenizer
lowercase__ : Union[str, Any] = MBartaaTokenizerFast
lowercase__ : Tuple = True
lowercase__ : Optional[Any] = True
def snake_case__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase = MBartaaTokenizer(lowerCamelCase__ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self ):
_lowerCamelCase = '''<s>'''
_lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase__ ) , 1_0_5_4 )
def snake_case__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def snake_case__ ( self ):
_lowerCamelCase = MBartaaTokenizer(lowerCamelCase__ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase__ )
_lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
_lowerCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = {'''input_ids''': [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def snake_case__ ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = tokenizer_r.save_pretrained(lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_lowerCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ )
# Checks everything loads correctly in the same way
_lowerCamelCase = tokenizer_r.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=True
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ )
# Checks everything loads correctly in the same way
_lowerCamelCase = tokenizer_r.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=False
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCamelCase = tokenizer_r.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[str] = 'facebook/mbart-large-50-one-to-many-mmt'
lowercase__ : Union[str, Any] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowercase__ : Optional[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowercase__ : Union[str, Any] = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def snake_case__ ( cls ):
_lowerCamelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_lowerCamelCase = 1
return cls
def snake_case__ ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 2_5_0_0_3_8 )
def snake_case__ ( self ):
_lowerCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ )
def snake_case__ ( self ):
self.assertIn(lowerCamelCase__ , self.tokenizer.all_special_ids )
_lowerCamelCase = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
_lowerCamelCase = self.tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
_lowerCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , lowerCamelCase__ )
_lowerCamelCase = 1_0
_lowerCamelCase = self.tokenizer(lowerCamelCase__ , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ ).input_ids[0]
self.assertEqual(ids[0] , lowerCamelCase__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
def snake_case__ ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def snake_case__ ( self ):
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase__ )
_lowerCamelCase = MBartaaTokenizer.from_pretrained(lowerCamelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase__ )
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase__ , return_tensors='''pt''' )
_lowerCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_lowerCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
_lowerCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def snake_case__ ( self ):
_lowerCamelCase = self.tokenizer(self.src_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=3 , return_tensors='''pt''' )
_lowerCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=1_0 , return_tensors='''pt''' )
_lowerCamelCase = targets['''input_ids''']
_lowerCamelCase = shift_tokens_right(lowerCamelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , )
| 623 |
"""simple docstring"""
from typing import Any
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = data
_lowerCamelCase = None
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = None
def snake_case__ ( self ):
_lowerCamelCase = self.head
while temp is not None:
print(temp.data , end=''' ''' )
_lowerCamelCase = temp.next
print()
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = Node(lowerCamelCase__ )
_lowerCamelCase = self.head
_lowerCamelCase = new_node
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
if node_data_a == node_data_a:
return
else:
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
if node_a is None or node_a is None:
return
_lowerCamelCase , _lowerCamelCase = node_a.data, node_a.data
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 623 | 1 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_2_8,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 5_0,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 1_0,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 1_0,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case__ ( cls ):
_lowerCamelCase = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def snake_case__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def snake_case__ ( self ):
_lowerCamelCase = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
_lowerCamelCase = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ , repo_id='''test-config''' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_lowerCamelCase = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def snake_case__ ( self ):
_lowerCamelCase = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
_lowerCamelCase = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_lowerCamelCase = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def snake_case__ ( self ):
CustomConfig.register_for_auto_class()
_lowerCamelCase = CustomConfig(attribute=4_2 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
_lowerCamelCase = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 4_2 )
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCamelCase = c.n_embd + 1 # int
_lowerCamelCase = c.resid_pdrop + 1.0 # float
_lowerCamelCase = not c.scale_attn_weights # bool
_lowerCamelCase = c.summary_type + '''foo''' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(lowerCamelCase__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(lowerCamelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCamelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCamelCase__ , c.summary_type , '''mismatch for key: summary_type''' )
def snake_case__ ( self ):
_lowerCamelCase = PretrainedConfig()
_lowerCamelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
_lowerCamelCase = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase__ , lowerCamelCase__ )]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F""" {", ".join(lowerCamelCase__ )}.""" )
def snake_case__ ( self ):
with self.assertRaises(lowerCamelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
_lowerCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( self ):
# A mock response for an HTTP head request to emulate server down
_lowerCamelCase = mock.Mock()
_lowerCamelCase = 5_0_0
_lowerCamelCase = {}
_lowerCamelCase = HTTPError
_lowerCamelCase = {}
# Download this model to make sure it's in the cache.
_lowerCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCamelCase__ ) as mock_head:
_lowerCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__ ( self ):
# This test is for deprecated behavior and can be removed in v5
_lowerCamelCase = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def snake_case__ ( self ):
_lowerCamelCase = AutoConfig.from_pretrained('''bert-base-cased''' )
_lowerCamelCase = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase__ )
_lowerCamelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCamelCase__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCamelCase = ['''config.42.0.0.json''']
_lowerCamelCase = 7_6_8
configuration.save_pretrained(lowerCamelCase__ )
shutil.move(os.path.join(lowerCamelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCamelCase__ , '''config.42.0.0.json''' ) )
_lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def snake_case__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCamelCase = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
_lowerCamelCase = '''v4.0.0'''
_lowerCamelCase , _lowerCamelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCamelCase = '''v3.0.0'''
_lowerCamelCase = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 623 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__SCREAMING_SNAKE_CASE : Optional[Any] = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCAmelCase_( lowercase_ : List[Any] ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[str] ) -> List[str]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
| 623 | 1 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 623 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=9_9 , lowerCamelCase__=1_3 , lowerCamelCase__=1_6 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=2 , lowerCamelCase__=3_2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=3_0 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = decoder_seq_length
# For common tests
_lowerCamelCase = self.decoder_seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = d_model
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = eos_token_id
_lowerCamelCase = bos_token_id
_lowerCamelCase = pad_token_id
_lowerCamelCase = decoder_start_token_id
_lowerCamelCase = use_cache
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = None
_lowerCamelCase = decoder_seq_length
_lowerCamelCase = 2
_lowerCamelCase = 1
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
_lowerCamelCase = True
_lowerCamelCase = TrOCRDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
_lowerCamelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
_lowerCamelCase = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase = model(lowerCamelCase__ )['''last_hidden_state''']
_lowerCamelCase = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ )['''last_hidden_state''']
# select random slice
_lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase__ : List[str] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowercase__ : Tuple = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
lowercase__ : Dict = True
lowercase__ : Optional[Any] = False
def snake_case__ ( self ):
_lowerCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCamelCase__ )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCamelCase__ )
def snake_case__ ( self ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def snake_case__ ( self ):
pass
| 623 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowerCamelCase_:
'''simple docstring'''
lowercase__ : List[str]
lowercase__ : Optional[str] = None
# Automatically constructed
lowercase__ : ClassVar[str] = "dict"
lowercase__ : ClassVar[Any] = None
lowercase__ : str = field(default='Translation', init=A__, repr=A__ )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def snake_case__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class lowerCamelCase_:
'''simple docstring'''
lowercase__ : Optional[List] = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[str] = None
# Automatically constructed
lowercase__ : ClassVar[str] = "dict"
lowercase__ : ClassVar[Any] = None
lowercase__ : str = field(default='TranslationVariableLanguages', init=A__, repr=A__ )
def snake_case__ ( self ):
_lowerCamelCase = sorted(set(self.languages ) ) if self.languages else None
_lowerCamelCase = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = set(self.languages )
if self.languages and set(lowerCamelCase__ ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(lowerCamelCase__ ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase__ )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_lowerCamelCase = []
for lang, text in translation_dict.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_lowerCamelCase , _lowerCamelCase = zip(*sorted(lowerCamelCase__ ) )
return {"language": languages, "translation": translations}
def snake_case__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 623 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__=None , **lowerCamelCase__ ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCamelCase__ , )
super().__init__(args=lowerCamelCase__ , **lowerCamelCase__ )
| 623 | 1 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=3.6 ):
_lowerCamelCase = tokenizer
_lowerCamelCase = tokenizer.bos_token_id
_lowerCamelCase = dataset
_lowerCamelCase = seq_length
_lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
_lowerCamelCase = iter(self.dataset )
_lowerCamelCase = True
while more_examples:
_lowerCamelCase , _lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase = False
break
_lowerCamelCase = tokenizer(lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
_lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCamelCase__ ) , self.seq_length ):
_lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(lowerCamelCase__ ) == self.seq_length:
yield torch.tensor(lowerCamelCase__ )
def lowerCAmelCase_( lowercase_ : Any ) -> Optional[Any]:
_lowerCamelCase = {'''streaming''': True}
_lowerCamelCase = load_dataset(args.dataset_name , split='''train''' , **lowercase_ )
_lowerCamelCase = ConstantLengthDataset(lowercase_ , lowercase_ , seq_length=args.seq_length )
_lowerCamelCase = DataLoader(lowercase_ , batch_size=args.batch_size )
return eval_dataloader
def lowerCAmelCase_( lowercase_ : Tuple ) -> str:
model.eval()
_lowerCamelCase = []
for step, batch in enumerate(lowercase_ ):
with torch.no_grad():
_lowerCamelCase = model(lowercase_ , labels=lowercase_ )
_lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowercase_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase = torch.mean(torch.cat(lowercase_ ) )
try:
_lowerCamelCase = torch.exp(lowercase_ )
except OverflowError:
_lowerCamelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
__SCREAMING_SNAKE_CASE : Dict = Accelerator()
# Parse configuration
__SCREAMING_SNAKE_CASE : Tuple = HfArgumentParser(EvaluationArguments)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
set_seed(args.seed)
# Logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
__SCREAMING_SNAKE_CASE : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__SCREAMING_SNAKE_CASE : str = create_dataloader(args)
# Prepare everything with our `accelerator`.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 623 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=5_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=7 , lowerCamelCase__="gelu_new" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=4 , lowerCamelCase__="block_sparse" , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=2 , lowerCamelCase__=3 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_choices
_lowerCamelCase = rescale_embeddings
_lowerCamelCase = attention_type
_lowerCamelCase = use_bias
_lowerCamelCase = block_size
_lowerCamelCase = num_random_blocks
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[str] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def snake_case__ ( self ):
_lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_hidden_states_output()
@slow
def snake_case__ ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , **lowerCamelCase__ )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1e-5 , lowerCamelCase__="outputs" , lowerCamelCase__=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
| 623 | 1 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 623 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = StableDiffusionXLImgaImgPipeline
lowercase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase__ : int = PipelineTesterMixin.required_optional_params - {'latents'}
lowercase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase__ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
_lowerCamelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
_lowerCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , )
_lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = CLIPTextModelWithProjection(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
_lowerCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_lowerCamelCase = image / 2 + 0.5
if str(lowerCamelCase__ ).startswith('''mps''' ):
_lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = sd_pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# forward without prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = negative_prompt
_lowerCamelCase = 3 * [inputs['''prompt''']]
_lowerCamelCase = sd_pipe(**lowerCamelCase__ )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = sd_pipe.encode_prompt(lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
_lowerCamelCase = sd_pipe(
**lowerCamelCase__ , prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , pooled_prompt_embeds=lowerCamelCase__ , negative_pooled_prompt_embeds=lowerCamelCase__ , )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=0 ):
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 6_4, 6_4) )
_lowerCamelCase = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs(lowerCamelCase__ )
_lowerCamelCase = pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 623 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Any = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 623 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE : Dict = {
'''RUCAIBox/mvp''': 1_0_2_4,
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : str = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[Any] = ['input_ids', 'attention_mask']
lowercase__ : Optional[int] = MvpTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , lowerCamelCase__=True , **lowerCamelCase__ , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCamelCase__ ) != add_prefix_space:
_lowerCamelCase = getattr(lowerCamelCase__ , pre_tok_state.pop('''type''' ) )
_lowerCamelCase = add_prefix_space
_lowerCamelCase = pre_tok_class(**lowerCamelCase__ )
_lowerCamelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase = '''post_processor'''
_lowerCamelCase = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
_lowerCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase = tuple(state['''sep'''] )
if "cls" in state:
_lowerCamelCase = tuple(state['''cls'''] )
_lowerCamelCase = False
if state.get('''add_prefix_space''' , lowerCamelCase__ ) != add_prefix_space:
_lowerCamelCase = add_prefix_space
_lowerCamelCase = True
if state.get('''trim_offsets''' , lowerCamelCase__ ) != trim_offsets:
_lowerCamelCase = trim_offsets
_lowerCamelCase = True
if changes_to_apply:
_lowerCamelCase = getattr(lowerCamelCase__ , state.pop('''type''' ) )
_lowerCamelCase = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
def snake_case__ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
_lowerCamelCase = value
def snake_case__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
_lowerCamelCase = kwargs.get('''is_split_into_words''' , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
_lowerCamelCase = kwargs.get('''is_split_into_words''' , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ):
_lowerCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 623 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Dict = random.Random()
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : int=1.0 , lowercase_ : str=None , lowercase_ : Optional[int]=None ) -> Any:
if rng is None:
_lowerCamelCase = global_rng
_lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=4_0_0 , lowerCamelCase__=2_0_0_0 , lowerCamelCase__=1_0 , lowerCamelCase__=1_6_0 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=4_0_0_0 , lowerCamelCase__=False , lowerCamelCase__=True , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = min_seq_length
_lowerCamelCase = max_seq_length
_lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase = padding_value
_lowerCamelCase = sampling_rate
_lowerCamelCase = return_attention_mask
_lowerCamelCase = do_normalize
_lowerCamelCase = feature_size
_lowerCamelCase = chunk_length
_lowerCamelCase = hop_length
def snake_case__ ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self ):
_lowerCamelCase = WhisperFeatureExtractionTester(self )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase = feature_extractor(lowerCamelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase = np.asarray(lowerCamelCase__ )
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test truncation required
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
_lowerCamelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def snake_case__ ( self ):
import torch
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowerCamelCase = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = WhisperFeatureExtractor()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , lowerCamelCase__ , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = self._load_datasamples(1 )[0]
_lowerCamelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_lowerCamelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
| 623 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
@dataclass
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Any = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **lowerCamelCase__ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCamelCase = deprecated_arg[3:]
_lowerCamelCase = not kwargs.pop(lowerCamelCase__ )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
_lowerCamelCase = kwargs.pop('''tpu_name''' , self.tpu_name )
_lowerCamelCase = kwargs.pop('''device_idx''' , self.device_idx )
_lowerCamelCase = kwargs.pop('''eager_mode''' , self.eager_mode )
_lowerCamelCase = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**lowerCamelCase__ )
lowercase__ : str = field(
default=A__, metadata={'help': 'Name of TPU'}, )
lowercase__ : int = field(
default=0, metadata={'help': 'CPU / GPU device index. Defaults to 0.'}, )
lowercase__ : bool = field(default=A__, metadata={'help': 'Benchmark models in eager model.'} )
lowercase__ : bool = field(
default=A__, metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
}, )
@cached_property
def snake_case__ ( self ):
requires_backends(self , ['''tf'''] )
_lowerCamelCase = None
if self.tpu:
try:
if self.tpu_name:
_lowerCamelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_lowerCamelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_lowerCamelCase = None
return tpu
@cached_property
def snake_case__ ( self ):
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_lowerCamelCase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
_lowerCamelCase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
_lowerCamelCase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def snake_case__ ( self ):
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def snake_case__ ( self ):
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def snake_case__ ( self ):
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def snake_case__ ( self ):
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def snake_case__ ( self ):
return self.n_gpu > 0
| 623 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str ) -> bool:
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_lowerCamelCase = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_lowerCamelCase = True
if a[i].islower():
_lowerCamelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=3_0 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=3 , lowerCamelCase__=0.6 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = mask_ratio
_lowerCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase = (image_size // patch_size) ** 2
_lowerCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEModel(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
# expected sequence length = num_patches
_lowerCamelCase = (self.image_size // self.patch_size) ** 2
_lowerCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
_lowerCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase__ : Dict = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
lowercase__ : str = False
lowercase__ : List[str] = False
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , tf.keras.layers.Layer ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = outputs_dict[0].numpy()
_lowerCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase__ ):
_lowerCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase__ ):
_lowerCamelCase = v.numpy()
else:
_lowerCamelCase = np.array(lowerCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = prepare_numpy_arrays(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# make masks reproducible
np.random.seed(2 )
_lowerCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase__ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(lowerCamelCase__ , lowerCamelCase__ ),)
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase__ , '''_keras_serializable''' , lowerCamelCase__ )
}
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
_lowerCamelCase = main_layer_class(lowerCamelCase__ )
_lowerCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_lowerCamelCase = tf.keras.Model(lowerCamelCase__ , outputs=main_layer(lowerCamelCase__ ) )
_lowerCamelCase = model(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''keras_model.h5''' )
model.save(lowerCamelCase__ )
_lowerCamelCase = tf.keras.models.load_model(
lowerCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase__ , tf.keras.Model )
_lowerCamelCase = model(lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = outputs.last_hidden_state.numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = outputs.logits.numpy()
_lowerCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ , saved_model=lowerCamelCase__ )
_lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = after_outputs['''last_hidden_state'''].numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = after_outputs['''logits'''].numpy()
_lowerCamelCase = 0
_lowerCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1e-5 )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase__ )
_lowerCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_lowerCamelCase = model_class.from_config(model.config )
_lowerCamelCase = new_model(lowerCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
_lowerCamelCase = new_model(lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def snake_case__ ( self ):
pass
@slow
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_lowerCamelCase = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase = ViTMAEConfig()
_lowerCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
# verify the logits
_lowerCamelCase = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
| 623 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 1 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : str=True , lowercase_ : Tuple="pt" ) -> Union[str, Any]:
_lowerCamelCase = {'''add_prefix_space''': True} if isinstance(lowercase_ , lowercase_ ) and not line.startswith(''' ''' ) else {}
_lowerCamelCase = padding_side
return tokenizer(
[line] , max_length=lowercase_ , padding='''max_length''' if pad_to_max_length else None , truncation=lowercase_ , return_tensors=lowercase_ , add_special_tokens=lowercase_ , **lowercase_ , )
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : Any , lowercase_ : List[Any]=None , ) -> Union[str, Any]:
_lowerCamelCase = input_ids.ne(lowercase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="train" , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="" , ):
super().__init__()
_lowerCamelCase = Path(lowerCamelCase__ ).joinpath(type_path + '''.source''' )
_lowerCamelCase = Path(lowerCamelCase__ ).joinpath(type_path + '''.target''' )
_lowerCamelCase = self.get_char_lens(self.src_file )
_lowerCamelCase = max_source_length
_lowerCamelCase = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
_lowerCamelCase = tokenizer
_lowerCamelCase = prefix
if n_obs is not None:
_lowerCamelCase = self.src_lens[:n_obs]
_lowerCamelCase = src_lang
_lowerCamelCase = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , lowerCamelCase__ ):
_lowerCamelCase = index + 1 # linecache starts at 1
_lowerCamelCase = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase__ ).rstrip('''\n''' )
_lowerCamelCase = linecache.getline(str(self.tgt_file ) , lowerCamelCase__ ).rstrip('''\n''' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase__ ) else self.tokenizer
)
_lowerCamelCase = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase__ ) else self.tokenizer
_lowerCamelCase = encode_line(lowerCamelCase__ , lowerCamelCase__ , self.max_source_length , '''right''' )
_lowerCamelCase = encode_line(lowerCamelCase__ , lowerCamelCase__ , self.max_target_length , '''right''' )
_lowerCamelCase = source_inputs['''input_ids'''].squeeze()
_lowerCamelCase = target_inputs['''input_ids'''].squeeze()
_lowerCamelCase = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
return [len(lowerCamelCase__ ) for x in Path(lowerCamelCase__ ).open().readlines()]
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = torch.stack([x['''input_ids'''] for x in batch] )
_lowerCamelCase = torch.stack([x['''attention_mask'''] for x in batch] )
_lowerCamelCase = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_lowerCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase__ )
else self.tokenizer.pad_token_id
)
_lowerCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase__ )
else self.tokenizer.pad_token_id
)
_lowerCamelCase = trim_batch(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = trim_batch(lowerCamelCase__ , lowerCamelCase__ , attention_mask=lowerCamelCase__ )
_lowerCamelCase = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__SCREAMING_SNAKE_CASE : Union[str, Any] = getLogger(__name__)
def lowerCAmelCase_( lowercase_ : List[List] ) -> Any:
return list(itertools.chain.from_iterable(lowercase_ ) )
def lowerCAmelCase_( lowercase_ : str ) -> None:
_lowerCamelCase = get_git_info()
save_json(lowercase_ , os.path.join(lowercase_ , '''git_log.json''' ) )
def lowerCAmelCase_( lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Any=4 , **lowercase_ : Union[str, Any] ) -> Union[str, Any]:
with open(lowercase_ , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ , indent=lowercase_ , **lowercase_ )
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> List[str]:
with open(lowercase_ ) as f:
return json.load(lowercase_ )
def lowerCAmelCase_( ) -> Tuple:
_lowerCamelCase = git.Repo(search_parent_directories=lowercase_ )
_lowerCamelCase = {
'''repo_id''': str(lowercase_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def lowerCAmelCase_( lowercase_ : Callable , lowercase_ : Iterable ) -> List:
return list(map(lowercase_ , lowercase_ ) )
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : List[str] ) -> int:
with open(lowercase_ , '''wb''' ) as f:
return pickle.dump(lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : List[Any] ) -> List[Any]:
def remove_articles(lowercase_ : str ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , lowercase_ )
def white_space_fix(lowercase_ : Dict ):
return " ".join(text.split() )
def remove_punc(lowercase_ : Dict ):
_lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : List[str] ) -> List[str]:
_lowerCamelCase = normalize_answer(lowercase_ ).split()
_lowerCamelCase = normalize_answer(lowercase_ ).split()
_lowerCamelCase = Counter(lowercase_ ) & Counter(lowercase_ )
_lowerCamelCase = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase = 1.0 * num_same / len(lowercase_ )
_lowerCamelCase = 1.0 * num_same / len(lowercase_ )
_lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : str ) -> List[Any]:
return normalize_answer(lowercase_ ) == normalize_answer(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : List[str] ) -> Dict:
assert len(lowercase_ ) == len(lowercase_ )
_lowerCamelCase = 0
for hypo, pred in zip(lowercase_ , lowercase_ ):
em += exact_match_score(lowercase_ , lowercase_ )
if len(lowercase_ ) > 0:
em /= len(lowercase_ )
return {"em": em}
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> Optional[int]:
return model_prefix.startswith('''rag''' )
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : Any , lowercase_ : str ) -> Tuple:
_lowerCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase = '''dropout_rate'''
for p in extra_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
if not hasattr(lowercase_ , lowercase_ ) and not hasattr(lowercase_ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(lowercase_ ) )
delattr(lowercase_ , lowercase_ )
continue
_lowerCamelCase = p if hasattr(lowercase_ , lowercase_ ) else equivalent_param[p]
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
delattr(lowercase_ , lowercase_ )
return hparams, config
| 623 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {'''vocab_file''': '''spiece.model'''}
__SCREAMING_SNAKE_CASE : Tuple = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<sep>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<cls>" , lowerCamelCase__="<mask>" , lowerCamelCase__=["<eop>", "<eod>"] , lowerCamelCase__ = None , **lowerCamelCase__ , ):
_lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
_lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
_lowerCamelCase = 3
_lowerCamelCase = do_lower_case
_lowerCamelCase = remove_space
_lowerCamelCase = keep_accents
_lowerCamelCase = vocab_file
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
_lowerCamelCase = jieba
_lowerCamelCase = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def snake_case__ ( self ):
return len(self.sp_model )
def snake_case__ ( self ):
_lowerCamelCase = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = None
return state
def __setstate__( self , lowerCamelCase__ ):
_lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase = {}
_lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self , lowerCamelCase__ ):
if self.remove_space:
_lowerCamelCase = ''' '''.join(inputs.strip().split() )
else:
_lowerCamelCase = inputs
_lowerCamelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
_lowerCamelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase__ )
_lowerCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase__ )] )
if self.do_lower_case:
_lowerCamelCase = outputs.lower()
return outputs
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.preprocess_text(lowerCamelCase__ )
_lowerCamelCase = self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
_lowerCamelCase = []
for piece in pieces:
if len(lowerCamelCase__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
_lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase__ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCamelCase = cur_pieces[1:]
else:
_lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase__ )
else:
new_pieces.append(lowerCamelCase__ )
return new_pieces
def snake_case__ ( self , lowerCamelCase__ ):
return self.sp_model.PieceToId(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
return self.sp_model.IdToPiece(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = ''''''.join(lowerCamelCase__ ).replace(lowerCamelCase__ , ''' ''' ).strip()
return out_string
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1, 1]
return ([0] * len(lowerCamelCase__ )) + [1, 1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
_lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
def snake_case__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
_lowerCamelCase = super()._decode(*lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 623 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 1 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : int , lowercase_ : float , lowercase_ : float ) -> float:
return round(float(moles / volume ) * nfactor )
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> float:
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> float:
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> float:
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCAmelCase_( lowercase_ : list[Any] ) -> None:
create_state_space_tree(lowercase_ , [] , 0 )
def lowerCAmelCase_( lowercase_ : list[Any] , lowercase_ : list[Any] , lowercase_ : int ) -> None:
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 623 | 1 |
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : str = 'autoformer'
lowercase__ : List[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "student_t" , lowerCamelCase__ = "nll" , lowerCamelCase__ = 1 , lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7] , lowerCamelCase__ = True , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 6_4 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 3_2 , lowerCamelCase__ = 3_2 , lowerCamelCase__ = "gelu" , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 1_0_0 , lowerCamelCase__ = 0.0_2 , lowerCamelCase__ = True , lowerCamelCase__=True , lowerCamelCase__ = 1_0 , lowerCamelCase__ = 2_5 , lowerCamelCase__ = 3 , **lowerCamelCase__ , ):
# time series specific configuration
_lowerCamelCase = prediction_length
_lowerCamelCase = context_length if context_length is not None else prediction_length
_lowerCamelCase = distribution_output
_lowerCamelCase = loss
_lowerCamelCase = input_size
_lowerCamelCase = num_time_features
_lowerCamelCase = lags_sequence
_lowerCamelCase = scaling
_lowerCamelCase = num_dynamic_real_features
_lowerCamelCase = num_static_real_features
_lowerCamelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowerCamelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_lowerCamelCase = cardinality
else:
_lowerCamelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowerCamelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_lowerCamelCase = embedding_dimension
else:
_lowerCamelCase = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_lowerCamelCase = num_parallel_samples
# Transformer architecture configuration
_lowerCamelCase = input_size * len(self.lags_sequence ) + self._number_of_features
_lowerCamelCase = d_model
_lowerCamelCase = encoder_attention_heads
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = encoder_ffn_dim
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = encoder_layers
_lowerCamelCase = decoder_layers
_lowerCamelCase = dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = encoder_layerdrop
_lowerCamelCase = decoder_layerdrop
_lowerCamelCase = activation_function
_lowerCamelCase = init_std
_lowerCamelCase = use_cache
# Autoformer
_lowerCamelCase = label_length
_lowerCamelCase = moving_average
_lowerCamelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def snake_case__ ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 623 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase_( A__ ):
'''simple docstring'''
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.', A__, )
| 623 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] ) -> Dict:
# Load configuration defined in the metadata file
with open(lowercase_ ) as metadata_file:
_lowerCamelCase = json.load(lowercase_ )
_lowerCamelCase = LukeConfig(use_entity_aware_attention=lowercase_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' )
# Load the entity vocab file
_lowerCamelCase = load_entity_vocab(lowercase_ )
_lowerCamelCase = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCamelCase = AddedToken('''<ent>''' , lstrip=lowercase_ , rstrip=lowercase_ )
_lowerCamelCase = AddedToken('''<ent2>''' , lstrip=lowercase_ , rstrip=lowercase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_lowerCamelCase = state_dict['''embeddings.word_embeddings.weight''']
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_lowerCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCamelCase = F"""encoder.layer.{layer_index}.attention.self."""
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCamelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_lowerCamelCase = entity_emb[entity_vocab['''[MASK]''']]
_lowerCamelCase = LukeModel(config=lowercase_ ).eval()
_lowerCamelCase , _lowerCamelCase = model.load_state_dict(lowercase_ , strict=lowercase_ )
if not (len(lowercase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(lowercase_ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ , task='''entity_classification''' )
_lowerCamelCase = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_lowerCamelCase = (39, 42)
_lowerCamelCase = tokenizer(lowercase_ , entity_spans=[span] , add_prefix_space=lowercase_ , return_tensors='''pt''' )
_lowerCamelCase = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 42, 10_24) )
_lowerCamelCase = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 42, 7_68) )
_lowerCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 1, 10_24) )
_lowerCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 1, 7_68) )
_lowerCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Any:
_lowerCamelCase = {}
with open(lowercase_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(lowercase_ ):
_lowerCamelCase , _lowerCamelCase = line.rstrip().split('''\t''' )
_lowerCamelCase = index
return entity_vocab
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 623 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_( lowercase_ : list ) -> float:
if not nums:
raise ValueError('''List is empty''' )
return sum(lowercase_ ) / len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=3_0 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=3 , lowerCamelCase__=0.6 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = mask_ratio
_lowerCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase = (image_size // patch_size) ** 2
_lowerCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEModel(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
# expected sequence length = num_patches
_lowerCamelCase = (self.image_size // self.patch_size) ** 2
_lowerCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
_lowerCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase__ : Dict = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
lowercase__ : str = False
lowercase__ : List[str] = False
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , tf.keras.layers.Layer ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = outputs_dict[0].numpy()
_lowerCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase__ ):
_lowerCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase__ ):
_lowerCamelCase = v.numpy()
else:
_lowerCamelCase = np.array(lowerCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = prepare_numpy_arrays(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# make masks reproducible
np.random.seed(2 )
_lowerCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase__ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(lowerCamelCase__ , lowerCamelCase__ ),)
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase__ , '''_keras_serializable''' , lowerCamelCase__ )
}
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
_lowerCamelCase = main_layer_class(lowerCamelCase__ )
_lowerCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_lowerCamelCase = tf.keras.Model(lowerCamelCase__ , outputs=main_layer(lowerCamelCase__ ) )
_lowerCamelCase = model(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''keras_model.h5''' )
model.save(lowerCamelCase__ )
_lowerCamelCase = tf.keras.models.load_model(
lowerCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase__ , tf.keras.Model )
_lowerCamelCase = model(lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = outputs.last_hidden_state.numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = outputs.logits.numpy()
_lowerCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ , saved_model=lowerCamelCase__ )
_lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = after_outputs['''last_hidden_state'''].numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = after_outputs['''logits'''].numpy()
_lowerCamelCase = 0
_lowerCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1e-5 )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase__ )
_lowerCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_lowerCamelCase = model_class.from_config(model.config )
_lowerCamelCase = new_model(lowerCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
_lowerCamelCase = new_model(lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def snake_case__ ( self ):
pass
@slow
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_lowerCamelCase = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase = ViTMAEConfig()
_lowerCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
# verify the logits
_lowerCamelCase = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
| 623 | 1 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__=None , **lowerCamelCase__ ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCamelCase__ , )
super().__init__(args=lowerCamelCase__ , **lowerCamelCase__ )
| 623 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_( lowercase_ : str = "laptop" ) -> DataFrame:
_lowerCamelCase = F"""https://www.amazon.in/laptop/s?k={product}"""
_lowerCamelCase = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_lowerCamelCase = BeautifulSoup(requests.get(lowercase_ , headers=lowercase_ ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCamelCase = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
_lowerCamelCase = item.ha.text
_lowerCamelCase = '''https://www.amazon.in/''' + item.ha.a['''href''']
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
_lowerCamelCase = '''Not available'''
try:
_lowerCamelCase = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
_lowerCamelCase = ''''''
try:
_lowerCamelCase = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
_lowerCamelCase = float('''nan''' )
except AttributeError:
pass
_lowerCamelCase = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCamelCase = ''' '''
_lowerCamelCase = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = '''headphones'''
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 623 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : Tuple = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=3.6 ):
_lowerCamelCase = tokenizer
_lowerCamelCase = tokenizer.bos_token_id
_lowerCamelCase = dataset
_lowerCamelCase = seq_length
_lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
_lowerCamelCase = iter(self.dataset )
_lowerCamelCase = True
while more_examples:
_lowerCamelCase , _lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase = False
break
_lowerCamelCase = tokenizer(lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
_lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCamelCase__ ) , self.seq_length ):
_lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(lowerCamelCase__ ) == self.seq_length:
yield torch.tensor(lowerCamelCase__ )
def lowerCAmelCase_( lowercase_ : Any ) -> Optional[Any]:
_lowerCamelCase = {'''streaming''': True}
_lowerCamelCase = load_dataset(args.dataset_name , split='''train''' , **lowercase_ )
_lowerCamelCase = ConstantLengthDataset(lowercase_ , lowercase_ , seq_length=args.seq_length )
_lowerCamelCase = DataLoader(lowercase_ , batch_size=args.batch_size )
return eval_dataloader
def lowerCAmelCase_( lowercase_ : Tuple ) -> str:
model.eval()
_lowerCamelCase = []
for step, batch in enumerate(lowercase_ ):
with torch.no_grad():
_lowerCamelCase = model(lowercase_ , labels=lowercase_ )
_lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowercase_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase = torch.mean(torch.cat(lowercase_ ) )
try:
_lowerCamelCase = torch.exp(lowercase_ )
except OverflowError:
_lowerCamelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
__SCREAMING_SNAKE_CASE : Dict = Accelerator()
# Parse configuration
__SCREAMING_SNAKE_CASE : Tuple = HfArgumentParser(EvaluationArguments)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
set_seed(args.seed)
# Logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
__SCREAMING_SNAKE_CASE : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__SCREAMING_SNAKE_CASE : str = create_dataloader(args)
# Prepare everything with our `accelerator`.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 623 | 1 |
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCAmelCase_( ) -> str:
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=lowercase_ , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=lowercase_ , default=5 )
parser.add_argument('''--batch_size''' , type=lowercase_ , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=lowercase_ , default=1 )
parser.add_argument('''--freeze''' , type=lowercase_ , default=lowercase_ )
parser.add_argument('''--learning_rate''' , type=lowercase_ , default=5e-4 )
parser.add_argument('''--seed''' , type=lowercase_ , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=lowercase_ , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=lowercase_ , default=10 )
parser.add_argument('''--weight_decay''' , type=lowercase_ , default=0.0_1 )
parser.add_argument('''--output_dir''' , type=lowercase_ , default='''./results''' )
return parser.parse_args()
__SCREAMING_SNAKE_CASE : Any = load('''accuracy''')
def lowerCAmelCase_( lowercase_ : str ) -> str:
_lowerCamelCase , _lowerCamelCase = eval_pred
_lowerCamelCase = np.argmax(lowercase_ , axis=1 )
return metric.compute(predictions=lowercase_ , references=lowercase_ )
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
super().__init__()
_lowerCamelCase = trainer
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
if control.should_evaluate:
_lowerCamelCase = deepcopy(lowerCamelCase__ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def lowerCAmelCase_( ) -> Any:
_lowerCamelCase = get_args()
set_seed(args.seed )
_lowerCamelCase = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
_lowerCamelCase = dataset.train_test_split(test_size=0.2 )
_lowerCamelCase = train_test['''test'''].train_test_split(test_size=0.5 )
_lowerCamelCase = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
_lowerCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
_lowerCamelCase = tokenizer.eos_token
_lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_lowerCamelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_lowerCamelCase = False
_lowerCamelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(lowercase_ : Union[str, Any] ):
_lowerCamelCase = tokenizer(example['''src'''] , truncation=lowercase_ , max_length=10_24 )
_lowerCamelCase = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_lowerCamelCase = train_test_validation.map(
lowercase_ , batched=lowercase_ , remove_columns=train_test_validation['''train'''].column_names , )
_lowerCamelCase = DataCollatorWithPadding(tokenizer=lowercase_ )
_lowerCamelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
_lowerCamelCase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=lowercase_ , data_collator=lowercase_ , compute_metrics=lowercase_ , )
print('''Training...''' )
trainer.add_callback(CustomCallback(lowercase_ ) )
trainer.train()
if __name__ == "__main__":
main()
| 623 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float = 1e-12 , lowercase_ : int = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[1]
# Ensure proper dimensionality.
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowercase_ ) == np.iscomplexobj(lowercase_ )
_lowerCamelCase = np.iscomplexobj(lowercase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowercase_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 1e12
while not convergence:
# Multiple matrix by the vector.
_lowerCamelCase = np.dot(lowercase_ , lowercase_ )
# Normalize the resulting output vector.
_lowerCamelCase = w / np.linalg.norm(lowercase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowerCamelCase = vector.conj().T if is_complex else vector.T
_lowerCamelCase = np.dot(lowercase_ , np.dot(lowercase_ , lowercase_ ) )
# Check convergence.
_lowerCamelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowerCamelCase = True
_lowerCamelCase = lambda_
if is_complex:
_lowerCamelCase = np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_( ) -> None:
_lowerCamelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowerCamelCase = np.array([41, 4, 20] )
_lowerCamelCase = real_input_matrix.astype(np.complexaaa )
_lowerCamelCase = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowerCamelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowerCamelCase = real_input_matrix
_lowerCamelCase = real_vector
elif problem_type == "complex":
_lowerCamelCase = complex_input_matrix
_lowerCamelCase = complex_vector
# Our implementation.
_lowerCamelCase , _lowerCamelCase = power_iteration(lowercase_ , lowercase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(lowercase_ )
# Last eigenvalue is the maximum one.
_lowerCamelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowerCamelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowercase_ ) - np.abs(lowercase_ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 623 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCAmelCase_( lowercase_ : list[Any] ) -> None:
create_state_space_tree(lowercase_ , [] , 0 )
def lowerCAmelCase_( lowercase_ : list[Any] , lowercase_ : list[Any] , lowercase_ : int ) -> None:
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 623 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE : Optional[Any] = TypeVar('''T''')
__SCREAMING_SNAKE_CASE : List[str] = TypeVar('''U''')
class lowerCamelCase_( Generic[T, U] ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = key
_lowerCamelCase = val
_lowerCamelCase = None
_lowerCamelCase = None
def __repr__( self ):
return (
F"""Node: key: {self.key}, val: {self.val}, """
F"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class lowerCamelCase_( Generic[T, U] ):
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = DoubleLinkedListNode(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = DoubleLinkedListNode(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = self.rear, self.head
def __repr__( self ):
_lowerCamelCase = ['''DoubleLinkedList''']
_lowerCamelCase = self.head
while node.next is not None:
rep.append(str(lowerCamelCase__ ) )
_lowerCamelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCamelCase = node
_lowerCamelCase = previous
_lowerCamelCase = node
_lowerCamelCase = self.rear
def snake_case__ ( self , lowerCamelCase__ ):
if node.prev is None or node.next is None:
return None
_lowerCamelCase = node.next
_lowerCamelCase = node.prev
_lowerCamelCase = None
_lowerCamelCase = None
return node
class lowerCamelCase_( Generic[T, U] ):
'''simple docstring'''
lowercase__ : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = DoubleLinkedList()
_lowerCamelCase = capacity
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = {}
def __repr__( self ):
return (
F"""CacheInfo(hits={self.hits}, misses={self.miss}, """
F"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self , lowerCamelCase__ ):
return key in self.cache
def snake_case__ ( self , lowerCamelCase__ ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_lowerCamelCase = self.cache[key]
_lowerCamelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase__ )
return node.val
self.miss += 1
return None
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCamelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCamelCase = DoubleLinkedListNode(lowerCamelCase__ , lowerCamelCase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCamelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_lowerCamelCase = value
self.list.add(lowerCamelCase__ )
@classmethod
def snake_case__ ( cls , lowerCamelCase__ = 1_2_8 ):
def cache_decorator_inner(lowerCamelCase__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCamelCase = LRUCache(lowerCamelCase__ )
_lowerCamelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_lowerCamelCase = func(*lowerCamelCase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase__ , '''cache_info''' , lowerCamelCase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__SCREAMING_SNAKE_CASE : List[str] = numpy.array([0, 0])
__SCREAMING_SNAKE_CASE : Optional[Any] = numpy.array([0.5, 0.866_0254])
__SCREAMING_SNAKE_CASE : Tuple = numpy.array([1, 0])
__SCREAMING_SNAKE_CASE : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] , lowercase_ : int ) -> list[numpy.ndarray]:
_lowerCamelCase = initial_vectors
for _ in range(lowercase_ ):
_lowerCamelCase = iteration_step(lowercase_ )
return vectors
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
_lowerCamelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCamelCase = vectors[i + 1]
new_vectors.append(lowercase_ )
_lowerCamelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase_( lowercase_ : numpy.ndarray , lowercase_ : float ) -> numpy.ndarray:
_lowerCamelCase = numpy.radians(lowercase_ )
_lowerCamelCase , _lowerCamelCase = numpy.cos(lowercase_ ), numpy.sin(lowercase_ )
_lowerCamelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> None:
_lowerCamelCase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCamelCase , _lowerCamelCase = zip(*lowercase_ )
plt.plot(lowercase_ , lowercase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : str = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 623 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Optional[Any]=False ) -> Tuple:
_lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase = ''''''
else:
_lowerCamelCase = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_( lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : str ) -> Tuple:
_lowerCamelCase = dct.pop(lowercase_ )
_lowerCamelCase = val
def lowerCAmelCase_( ) -> Tuple:
_lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : Optional[Any] ) -> List[str]:
_lowerCamelCase = DeiTConfig()
# all deit models have fine-tuned heads
_lowerCamelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_lowerCamelCase = 10_00
_lowerCamelCase = '''huggingface/label-files'''
_lowerCamelCase = '''imagenet-1k-id2label.json'''
_lowerCamelCase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
_lowerCamelCase = {int(lowercase_ ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = int(deit_name[-6:-4] )
_lowerCamelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
_lowerCamelCase = 1_92
_lowerCamelCase = 7_68
_lowerCamelCase = 12
_lowerCamelCase = 3
elif deit_name[9:].startswith('''small''' ):
_lowerCamelCase = 3_84
_lowerCamelCase = 15_36
_lowerCamelCase = 12
_lowerCamelCase = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
_lowerCamelCase = 10_24
_lowerCamelCase = 40_96
_lowerCamelCase = 24
_lowerCamelCase = 16
# load original model from timm
_lowerCamelCase = timm.create_model(lowercase_ , pretrained=lowercase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase = timm_model.state_dict()
_lowerCamelCase = create_rename_keys(lowercase_ , lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
read_in_q_k_v(lowercase_ , lowercase_ , lowercase_ )
# load HuggingFace model
_lowerCamelCase = DeiTForImageClassificationWithTeacher(lowercase_ ).eval()
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by DeiTImageProcessor
_lowerCamelCase = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_lowerCamelCase = DeiTImageProcessor(size=lowercase_ , crop_size=config.image_size )
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowerCamelCase = encoding['''pixel_values''']
_lowerCamelCase = model(lowercase_ )
_lowerCamelCase = timm_model(lowercase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase_ , outputs.logits , atol=1e-3 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 623 |
"""simple docstring"""
from typing import Any
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = data
_lowerCamelCase = None
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = None
def snake_case__ ( self ):
_lowerCamelCase = self.head
while temp is not None:
print(temp.data , end=''' ''' )
_lowerCamelCase = temp.next
print()
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = Node(lowerCamelCase__ )
_lowerCamelCase = self.head
_lowerCamelCase = new_node
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
if node_data_a == node_data_a:
return
else:
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
if node_a is None or node_a is None:
return
_lowerCamelCase , _lowerCamelCase = node_a.data, node_a.data
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 623 | 1 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ = "cpu" , lowerCamelCase__ = "openai/clip-vit-large-patch14" ):
_lowerCamelCase = device
_lowerCamelCase = CLIPTokenizerFast.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
_lowerCamelCase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
_lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_lowerCamelCase = torchvision.transforms.Resize(2_2_4 )
_lowerCamelCase = torchvision.transforms.CenterCrop(2_2_4 )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.resize(lowerCamelCase__ )
_lowerCamelCase = self.center_crop(lowerCamelCase__ )
_lowerCamelCase = self.normalize(lowerCamelCase__ )
return images
def __call__( self , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ):
_lowerCamelCase = self.tokenizer(text=lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = self.preprocess_img(lowerCamelCase__ )
_lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowerCamelCase_( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_1 , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__="image" , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , ):
super().__init__()
_lowerCamelCase = None
_lowerCamelCase = device if device else get_device()
if vqgan:
_lowerCamelCase = vqgan
else:
_lowerCamelCase = load_vqgan(self.device , conf_path=lowerCamelCase__ , ckpt_path=lowerCamelCase__ )
self.vqgan.eval()
if clip:
_lowerCamelCase = clip
else:
_lowerCamelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
_lowerCamelCase = ProcessorGradientFlow(device=self.device )
_lowerCamelCase = iterations
_lowerCamelCase = lr
_lowerCamelCase = log
_lowerCamelCase = make_grid
_lowerCamelCase = return_val
_lowerCamelCase = quantize
_lowerCamelCase = self.vqgan.decoder.z_shape
def snake_case__ ( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=5 , lowerCamelCase__=True ):
_lowerCamelCase = []
if output_path is None:
_lowerCamelCase = '''./animation.gif'''
if input_path is None:
_lowerCamelCase = self.save_path
_lowerCamelCase = sorted(glob(input_path + '''/*''' ) )
if not len(lowerCamelCase__ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(lowerCamelCase__ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
_lowerCamelCase = total_duration / len(lowerCamelCase__ )
_lowerCamelCase = [frame_duration] * len(lowerCamelCase__ )
if extend_frames:
_lowerCamelCase = 1.5
_lowerCamelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(lowerCamelCase__ ) )
imageio.mimsave(lowerCamelCase__ , lowerCamelCase__ , duration=lowerCamelCase__ )
print(F"""gif saved to {output_path}""" )
def snake_case__ ( self , lowerCamelCase__=None , lowerCamelCase__=None ):
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
_lowerCamelCase = preprocess(Image.open(lowerCamelCase__ ) , target_image_size=2_5_6 ).to(self.device )
_lowerCamelCase = preprocess_vqgan(lowerCamelCase__ )
_lowerCamelCase , *_lowerCamelCase = self.vqgan.encode(lowerCamelCase__ )
return z
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.latent.detach().requires_grad_()
_lowerCamelCase = base_latent + transform_vector
if self.quantize:
_lowerCamelCase , *_lowerCamelCase = self.vqgan.quantize(lowerCamelCase__ )
else:
_lowerCamelCase = trans_latent
return self.vqgan.decode(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
_lowerCamelCase = self.clip_preprocessor(text=lowerCamelCase__ , images=lowerCamelCase__ , return_tensors='''pt''' , padding=lowerCamelCase__ )
_lowerCamelCase = self.clip(**lowerCamelCase__ )
_lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
_lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , lowerCamelCase__ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
_lowerCamelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , lowerCamelCase__ , weights=neg_prompts['''weights'''] )
else:
_lowerCamelCase = torch.tensor([1] , device=self.device )
_lowerCamelCase = -torch.log(lowerCamelCase__ ) + torch.log(lowerCamelCase__ )
return loss
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = torch.randn_like(self.latent , requires_grad=lowerCamelCase__ , device=self.device )
_lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_lowerCamelCase = self._add_vector(lowerCamelCase__ )
_lowerCamelCase = loop_post_process(lowerCamelCase__ )
_lowerCamelCase = self._get_CLIP_loss(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
print('''CLIP loss''' , lowerCamelCase__ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=lowerCamelCase__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
wandb.init(reinit=lowerCamelCase__ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
_lowerCamelCase = Image.open(lowerCamelCase__ )
_lowerCamelCase = image.resize((2_5_6, 2_5_6) )
wandb.log('''Original Image''' , wandb.Image(lowerCamelCase__ ) )
def snake_case__ ( self , lowerCamelCase__ ):
if not prompts:
return []
_lowerCamelCase = []
_lowerCamelCase = []
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(lowerCamelCase__ , (tuple, list) ):
_lowerCamelCase = prompt[0]
_lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
_lowerCamelCase , _lowerCamelCase = prompt.split(''':''' )
_lowerCamelCase = float(lowerCamelCase__ )
else:
_lowerCamelCase = prompt
_lowerCamelCase = 1.0
processed_prompts.append(lowerCamelCase__ )
weights.append(lowerCamelCase__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCamelCase__ , device=self.device ),
}
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=None , ):
if image_path:
_lowerCamelCase = self._get_latent(lowerCamelCase__ )
else:
_lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCamelCase = self.process_prompts(lowerCamelCase__ )
_lowerCamelCase = self.process_prompts(lowerCamelCase__ )
if save_final and save_path is None:
_lowerCamelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
else:
_lowerCamelCase = save_path + '''_''' + get_timestamp()
os.makedirs(lowerCamelCase__ )
_lowerCamelCase = save_path
_lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(lowerCamelCase__ ) )
_lowerCamelCase = loop_post_process(lowerCamelCase__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) ):
if show_intermediate:
show_pil(lowerCamelCase__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(lowerCamelCase__ )} )
if show_final:
show_pil(lowerCamelCase__ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}_final.png""" ) )
| 623 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__SCREAMING_SNAKE_CASE : Optional[Any] = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCAmelCase_( lowercase_ : List[Any] ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[str] ) -> List[str]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
| 623 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = 'megatron-bert'
def __init__( self , lowerCamelCase__=2_9_0_5_6 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=2_4 , lowerCamelCase__=1_6 , lowerCamelCase__=4_0_9_6 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-12 , lowerCamelCase__=0 , lowerCamelCase__="absolute" , lowerCamelCase__=True , **lowerCamelCase__ , ):
super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_act
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = position_embedding_type
_lowerCamelCase = use_cache
| 623 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 623 | 1 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : int = 1_00_00_00 ) -> int:
_lowerCamelCase = 1
_lowerCamelCase = 1
_lowerCamelCase = {1: 1}
for inputa in range(2 , lowercase_ ):
_lowerCamelCase = 0
_lowerCamelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCamelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCamelCase = counter
if counter > pre_counter:
_lowerCamelCase = inputa
_lowerCamelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 623 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=9_9 , lowerCamelCase__=1_3 , lowerCamelCase__=1_6 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=2 , lowerCamelCase__=3_2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=3_0 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = decoder_seq_length
# For common tests
_lowerCamelCase = self.decoder_seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = d_model
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = eos_token_id
_lowerCamelCase = bos_token_id
_lowerCamelCase = pad_token_id
_lowerCamelCase = decoder_start_token_id
_lowerCamelCase = use_cache
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = None
_lowerCamelCase = decoder_seq_length
_lowerCamelCase = 2
_lowerCamelCase = 1
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
_lowerCamelCase = True
_lowerCamelCase = TrOCRDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
_lowerCamelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
_lowerCamelCase = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase = model(lowerCamelCase__ )['''last_hidden_state''']
_lowerCamelCase = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ )['''last_hidden_state''']
# select random slice
_lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase__ : List[str] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowercase__ : Tuple = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
lowercase__ : Dict = True
lowercase__ : Optional[Any] = False
def snake_case__ ( self ):
_lowerCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCamelCase__ )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCamelCase__ )
def snake_case__ ( self ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def snake_case__ ( self ):
pass
| 623 | 1 |
"""simple docstring"""
def lowerCAmelCase_( ) -> Optional[int]:
for n in range(1 , 1_00_00_00 ):
yield n * (n + 1) // 2
def lowerCAmelCase_( lowercase_ : int ) -> int:
_lowerCamelCase = 1
_lowerCamelCase = 2
while i * i <= n:
_lowerCamelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCAmelCase_( ) -> List[Any]:
return next(i for i in triangle_number_generator() if count_divisors(lowercase_ ) > 5_00 )
if __name__ == "__main__":
print(solution())
| 623 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__=None , **lowerCamelCase__ ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCamelCase__ , )
super().__init__(args=lowerCamelCase__ , **lowerCamelCase__ )
| 623 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__SCREAMING_SNAKE_CASE : Optional[int] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_:
'''simple docstring'''
lowercase__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase__ : Optional[str] = field(
default=A__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase__ : Optional[str] = field(
default='NER', metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
lowercase__ : Optional[str] = field(
default=A__, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase__ : bool = field(default=A__, metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase__ : Optional[str] = field(
default=A__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
@dataclass
class lowerCamelCase_:
'''simple docstring'''
lowercase__ : str = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
lowercase__ : Optional[str] = field(
default=A__, metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'}, )
lowercase__ : int = field(
default=128, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
lowercase__ : bool = field(
default=A__, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowerCAmelCase_( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
_lowerCamelCase = import_module('''tasks''' )
try:
_lowerCamelCase = getattr(lowercase_ , model_args.task_type )
_lowerCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowercase_ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_lowerCamelCase = token_classification_task.get_labels(data_args.labels )
_lowerCamelCase = dict(enumerate(lowercase_ ) )
_lowerCamelCase = len(lowercase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase_ , idalabel=lowercase_ , labelaid={label: i for i, label in enumerate(lowercase_ )} , cache_dir=model_args.cache_dir , )
_lowerCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_lowerCamelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , )
# Get datasets
_lowerCamelCase = (
TokenClassificationDataset(
token_classification_task=lowercase_ , data_dir=data_args.data_dir , tokenizer=lowercase_ , labels=lowercase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_lowerCamelCase = (
TokenClassificationDataset(
token_classification_task=lowercase_ , data_dir=data_args.data_dir , tokenizer=lowercase_ , labels=lowercase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(lowercase_ : np.ndarray , lowercase_ : np.ndarray ) -> Tuple[List[int], List[int]]:
_lowerCamelCase = np.argmax(lowercase_ , axis=2 )
_lowerCamelCase , _lowerCamelCase = preds.shape
_lowerCamelCase = [[] for _ in range(lowercase_ )]
_lowerCamelCase = [[] for _ in range(lowercase_ )]
for i in range(lowercase_ ):
for j in range(lowercase_ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(lowercase_ : EvalPrediction ) -> Dict:
_lowerCamelCase , _lowerCamelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(lowercase_ , lowercase_ ),
"precision": precision_score(lowercase_ , lowercase_ ),
"recall": recall_score(lowercase_ , lowercase_ ),
"f1": fa_score(lowercase_ , lowercase_ ),
}
# Data collator
_lowerCamelCase = DataCollatorWithPadding(lowercase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_lowerCamelCase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , compute_metrics=lowercase_ , data_collator=lowercase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_lowerCamelCase = trainer.evaluate()
_lowerCamelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(lowercase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , lowercase_ , lowercase_ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(lowercase_ )
# Predict
if training_args.do_predict:
_lowerCamelCase = TokenClassificationDataset(
token_classification_task=lowercase_ , data_dir=data_args.data_dir , tokenizer=lowercase_ , labels=lowercase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = trainer.predict(lowercase_ )
_lowerCamelCase , _lowerCamelCase = align_predictions(lowercase_ , lowercase_ )
_lowerCamelCase = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(lowercase_ , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , lowercase_ , lowercase_ )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
_lowerCamelCase = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(lowercase_ , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(lowercase_ , lowercase_ , lowercase_ )
return results
def lowerCAmelCase_( lowercase_ : Dict ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 623 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=5_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=7 , lowerCamelCase__="gelu_new" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=4 , lowerCamelCase__="block_sparse" , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=2 , lowerCamelCase__=3 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_choices
_lowerCamelCase = rescale_embeddings
_lowerCamelCase = attention_type
_lowerCamelCase = use_bias
_lowerCamelCase = block_size
_lowerCamelCase = num_random_blocks
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[str] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def snake_case__ ( self ):
_lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_hidden_states_output()
@slow
def snake_case__ ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , **lowerCamelCase__ )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1e-5 , lowerCamelCase__="outputs" , lowerCamelCase__=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
| 623 | 1 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = BarthezTokenizer
lowercase__ : List[str] = BarthezTokenizerFast
lowercase__ : int = True
lowercase__ : List[str] = True
def snake_case__ ( self ):
super().setUp()
_lowerCamelCase = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCamelCase__ )
_lowerCamelCase = tokenizer
def snake_case__ ( self ):
_lowerCamelCase = '''<pad>'''
_lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase__ ) , 1_0_1_1_2_2 )
def snake_case__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def snake_case__ ( self ):
_lowerCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_lowerCamelCase = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
_lowerCamelCase = self.tokenizer(
lowerCamelCase__ , max_length=len(lowerCamelCase__ ) , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_lowerCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = '''I was born in 92000, and this is falsé.'''
_lowerCamelCase = tokenizer.tokenize(lowerCamelCase__ )
_lowerCamelCase = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
_lowerCamelCase = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = tokenizer.encode(lowerCamelCase__ )
_lowerCamelCase = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = {'''input_ids''': [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_lowerCamelCase = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=lowerCamelCase__ , )
| 623 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = StableDiffusionXLImgaImgPipeline
lowercase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase__ : int = PipelineTesterMixin.required_optional_params - {'latents'}
lowercase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ):
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase__ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
_lowerCamelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
_lowerCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , )
_lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = CLIPTextModelWithProjection(lowerCamelCase__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCamelCase__ )
_lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
_lowerCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_lowerCamelCase = image / 2 + 0.5
if str(lowerCamelCase__ ).startswith('''mps''' ):
_lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = sd_pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
_lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# forward without prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = negative_prompt
_lowerCamelCase = 3 * [inputs['''prompt''']]
_lowerCamelCase = sd_pipe(**lowerCamelCase__ )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCamelCase = 3 * ['''this is a negative prompt''']
_lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = sd_pipe.encode_prompt(lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
_lowerCamelCase = sd_pipe(
**lowerCamelCase__ , prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , pooled_prompt_embeds=lowerCamelCase__ , negative_pooled_prompt_embeds=lowerCamelCase__ , )
_lowerCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__="cpu" , lowerCamelCase__=torch.floataa , lowerCamelCase__=0 ):
_lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCamelCase = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 4, 6_4, 6_4) )
_lowerCamelCase = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
_lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self ):
_lowerCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCamelCase = self.get_inputs(lowerCamelCase__ )
_lowerCamelCase = pipe(**lowerCamelCase__ ).images
_lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 623 | 1 |
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
__SCREAMING_SNAKE_CASE : Optional[Any] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__SCREAMING_SNAKE_CASE : Dict = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCAmelCase_( lowercase_ : list[list[int]] ) -> list[list[int]]:
_lowerCamelCase = []
for i in range(len(lowercase_ ) ):
_lowerCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_lowerCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowercase_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowercase_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowercase_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_lowerCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowercase_ )
return next_generation
def lowerCAmelCase_( lowercase_ : list[list[int]] , lowercase_ : int ) -> list[Image.Image]:
_lowerCamelCase = []
for _ in range(lowercase_ ):
# Create output image
_lowerCamelCase = Image.new('''RGB''' , (len(cells[0] ), len(lowercase_ )) )
_lowerCamelCase = img.load()
# Save cells to image
for x in range(len(lowercase_ ) ):
for y in range(len(cells[0] ) ):
_lowerCamelCase = 2_55 - cells[y][x] * 2_55
_lowerCamelCase = (colour, colour, colour)
# Save image
images.append(lowercase_ )
_lowerCamelCase = new_generation(lowercase_ )
return images
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = generate_images(GLIDER, 1_6)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 623 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 1 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Optional[Any]:
_lowerCamelCase = fname.split(os.path.sep )[-1]
return re.search(r'''^(.*)_\d+\.jpg$''' , lowercase_ ).groups()[0]
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ):
_lowerCamelCase = file_names
_lowerCamelCase = image_transform
_lowerCamelCase = label_to_id
def __len__( self ):
return len(self.file_names )
def __getitem__( self , lowerCamelCase__ ):
_lowerCamelCase = self.file_names[idx]
_lowerCamelCase = PIL.Image.open(lowerCamelCase__ )
_lowerCamelCase = raw_image.convert('''RGB''' )
if self.image_transform is not None:
_lowerCamelCase = self.image_transform(lowerCamelCase__ )
_lowerCamelCase = extract_label(lowerCamelCase__ )
if self.label_to_id is not None:
_lowerCamelCase = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : List[Any] ) -> Union[str, Any]:
# Initialize accelerator
if args.with_tracking:
_lowerCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
_lowerCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase = config['''lr''']
_lowerCamelCase = int(config['''num_epochs'''] )
_lowerCamelCase = int(config['''seed'''] )
_lowerCamelCase = int(config['''batch_size'''] )
_lowerCamelCase = config['''image_size''']
if not isinstance(lowercase_ , (list, tuple) ):
_lowerCamelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
_lowerCamelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_lowerCamelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
_lowerCamelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_lowerCamelCase = os.path.split(lowercase_ )[-1].split('''.''' )[0]
accelerator.init_trackers(lowercase_ , lowercase_ )
# Grab all the image filenames
_lowerCamelCase = [os.path.join(args.data_dir , lowercase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
_lowerCamelCase = [extract_label(lowercase_ ) for fname in file_names]
_lowerCamelCase = list(set(lowercase_ ) )
id_to_label.sort()
_lowerCamelCase = {lbl: i for i, lbl in enumerate(lowercase_ )}
# Set the seed before splitting the data.
np.random.seed(lowercase_ )
torch.manual_seed(lowercase_ )
torch.cuda.manual_seed_all(lowercase_ )
# Split our filenames between train and validation
_lowerCamelCase = np.random.permutation(len(lowercase_ ) )
_lowerCamelCase = int(0.8 * len(lowercase_ ) )
_lowerCamelCase = random_perm[:cut]
_lowerCamelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_lowerCamelCase = Compose([RandomResizedCrop(lowercase_ , scale=(0.5, 1.0) ), ToTensor()] )
_lowerCamelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# For evaluation, we use a deterministic Resize
_lowerCamelCase = Compose([Resize(lowercase_ ), ToTensor()] )
_lowerCamelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# Instantiate dataloaders.
_lowerCamelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
_lowerCamelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase = create_model('''resnet50d''' , pretrained=lowercase_ , num_classes=len(lowercase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_lowerCamelCase = False
for param in model.get_classifier().parameters():
_lowerCamelCase = True
# We normalize the batches of images to be a bit faster.
_lowerCamelCase = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
_lowerCamelCase = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_lowerCamelCase = OneCycleLR(optimizer=lowercase_ , max_lr=lowercase_ , epochs=lowercase_ , steps_per_epoch=len(lowercase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
_lowerCamelCase = 0
# We also need to keep track of the starting epoch so files are named properly
_lowerCamelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
_lowerCamelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_lowerCamelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_lowerCamelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_lowerCamelCase = os.path.splitext(lowercase_ )[0]
if "epoch" in training_difference:
_lowerCamelCase = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
_lowerCamelCase = None
else:
_lowerCamelCase = int(training_difference.replace('''step_''' , '''''' ) )
_lowerCamelCase = resume_step // len(lowercase_ )
resume_step -= starting_epoch * len(lowercase_ )
# Now we train the model
for epoch in range(lowercase_ , lowercase_ ):
model.train()
if args.with_tracking:
_lowerCamelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_lowerCamelCase = accelerator.skip_first_batches(lowercase_ , lowercase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_lowerCamelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowerCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
_lowerCamelCase = (batch['''image'''] - mean) / std
_lowerCamelCase = model(lowercase_ )
_lowerCamelCase = torch.nn.functional.cross_entropy(lowercase_ , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase_ , lowercase_ ):
_lowerCamelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_lowerCamelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
model.eval()
_lowerCamelCase = 0
_lowerCamelCase = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowerCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
_lowerCamelCase = (batch['''image'''] - mean) / std
with torch.no_grad():
_lowerCamelCase = model(lowercase_ )
_lowerCamelCase = outputs.argmax(dim=-1 )
_lowerCamelCase , _lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''label''']) )
_lowerCamelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_lowerCamelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {1_00 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 1_00 * eval_metric,
'''train_loss''': total_loss.item() / len(lowercase_ ),
'''epoch''': epoch,
} , step=lowercase_ , )
if checkpointing_steps == "epoch":
_lowerCamelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
_lowerCamelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase_( ) -> str:
_lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=lowercase_ , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase_ , default=lowercase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=lowercase_ , default=lowercase_ , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=lowercase_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=lowercase_ , default=lowercase_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=lowercase_ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = {'''lr''': 3e-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 2_24}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 623 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Dict = random.Random()
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : int=1.0 , lowercase_ : str=None , lowercase_ : Optional[int]=None ) -> Any:
if rng is None:
_lowerCamelCase = global_rng
_lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=4_0_0 , lowerCamelCase__=2_0_0_0 , lowerCamelCase__=1_0 , lowerCamelCase__=1_6_0 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=4_0_0_0 , lowerCamelCase__=False , lowerCamelCase__=True , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = min_seq_length
_lowerCamelCase = max_seq_length
_lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase = padding_value
_lowerCamelCase = sampling_rate
_lowerCamelCase = return_attention_mask
_lowerCamelCase = do_normalize
_lowerCamelCase = feature_size
_lowerCamelCase = chunk_length
_lowerCamelCase = hop_length
def snake_case__ ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self ):
_lowerCamelCase = WhisperFeatureExtractionTester(self )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase = feature_extractor(lowerCamelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase = np.asarray(lowerCamelCase__ )
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test truncation required
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
_lowerCamelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def snake_case__ ( self ):
import torch
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowerCamelCase = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = WhisperFeatureExtractor()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , lowerCamelCase__ , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = self._load_datasamples(1 )[0]
_lowerCamelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_lowerCamelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
| 623 | 1 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCAmelCase_( lowercase_ : Optional[int] ) -> Optional[int]:
if isinstance(lowercase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCamelCase_:
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase__ , lowerCamelCase__ , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
_lowerCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = FlaxVisionTextDualEncoderModel(lowerCamelCase__ )
_lowerCamelCase = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
_lowerCamelCase , _lowerCamelCase = self.get_vision_text_model(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_lowerCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase__ )
_lowerCamelCase = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
_lowerCamelCase , _lowerCamelCase = self.get_vision_text_model(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_lowerCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase__ )
_lowerCamelCase = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
_lowerCamelCase = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
_lowerCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
_lowerCamelCase = after_output[0]
_lowerCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1e-3 )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
_lowerCamelCase , _lowerCamelCase = self.get_vision_text_model(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_lowerCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase__ )
_lowerCamelCase = model(
input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_attentions=lowerCamelCase__ )
_lowerCamelCase = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase = to_atuple(vision_model.config.image_size )
_lowerCamelCase = to_atuple(vision_model.config.patch_size )
_lowerCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCamelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCamelCase = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
pt_model.to(lowerCamelCase__ )
pt_model.eval()
# prepare inputs
_lowerCamelCase = inputs_dict
_lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_lowerCamelCase = pt_model(**lowerCamelCase__ ).to_tuple()
_lowerCamelCase = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase__ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__ )
_lowerCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
_lowerCamelCase = fx_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase__ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__ )
_lowerCamelCase = VisionTextDualEncoderModel.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ )
pt_model_loaded.to(lowerCamelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
_lowerCamelCase = pt_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase__ , pt_output_loaded.numpy() , 4e-2 )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = VisionTextDualEncoderModel(lowerCamelCase__ )
_lowerCamelCase = FlaxVisionTextDualEncoderModel(lowerCamelCase__ )
_lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ )
_lowerCamelCase = fx_state
self.check_pt_flax_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = VisionTextDualEncoderModel(lowerCamelCase__ )
_lowerCamelCase = FlaxVisionTextDualEncoderModel(lowerCamelCase__ )
_lowerCamelCase = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase__ )
@is_pt_flax_cross_test
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase = config_inputs_dict.pop('''vision_config''' )
_lowerCamelCase = config_inputs_dict.pop('''text_config''' )
_lowerCamelCase = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.check_equivalence_flax_to_pt(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.get_pretrained_model_and_inputs()
_lowerCamelCase = model_a(**lowerCamelCase__ )
_lowerCamelCase = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase__ )
_lowerCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = model_a(**lowerCamelCase__ )
_lowerCamelCase = after_outputs[0]
_lowerCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1e-5 )
@require_flax
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCamelCase__ , text_from_pt=lowerCamelCase__ , )
_lowerCamelCase = 1_3
_lowerCamelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_lowerCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_lowerCamelCase = random_attention_mask([batch_size, 4] )
_lowerCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = FlaxViTModel(lowerCamelCase__ )
_lowerCamelCase = FlaxBertModel(lowerCamelCase__ )
return vision_model, text_model
def snake_case__ ( self ):
_lowerCamelCase = FlaxViTModelTester(self )
_lowerCamelCase = FlaxBertModelTester(self )
_lowerCamelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCamelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase = vision_config_and_inputs
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCamelCase__ , text_from_pt=lowerCamelCase__ , )
_lowerCamelCase = 1_3
_lowerCamelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_lowerCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_lowerCamelCase = random_attention_mask([batch_size, 4] )
_lowerCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = FlaxCLIPVisionModel(lowerCamelCase__ )
_lowerCamelCase = FlaxBertModel(lowerCamelCase__ )
return vision_model, text_model
def snake_case__ ( self ):
_lowerCamelCase = FlaxCLIPVisionModelTester(self )
_lowerCamelCase = FlaxBertModelTester(self )
_lowerCamelCase = clip_model_tester.prepare_config_and_inputs()
_lowerCamelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase = vision_config_and_inputs
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self ):
_lowerCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
_lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowerCamelCase = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors='''np''' )
_lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowerCamelCase = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCamelCase__ , atol=1e-3 ) )
| 623 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str ) -> bool:
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_lowerCamelCase = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_lowerCamelCase = True
if a[i].islower():
_lowerCamelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 1 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_( lowercase_ : str , lowercase_ : dict ) -> str:
_lowerCamelCase = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' )
_lowerCamelCase = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
_lowerCamelCase = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 3_0,
'''pages''': '''3979-3990''',
'''year''': 2_0_1_8,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 623 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 1 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = LongformerTokenizer
lowercase__ : str = True
lowercase__ : List[str] = LongformerTokenizerFast
lowercase__ : Optional[Any] = True
def snake_case__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_lowerCamelCase = {'''unk_token''': '''<unk>'''}
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase__ ) )
def snake_case__ ( self , **lowerCamelCase__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def snake_case__ ( self , **lowerCamelCase__ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = '''lower newer'''
_lowerCamelCase = '''lower newer'''
return input_text, output_text
def snake_case__ ( self ):
_lowerCamelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase = '''lower newer'''
_lowerCamelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_lowerCamelCase = tokenizer.tokenize(lowerCamelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = tokens + [tokenizer.unk_token]
_lowerCamelCase = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowerCamelCase__ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowerCamelCase__ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def snake_case__ ( self ):
_lowerCamelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
_lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase__ )
_lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase__ )
_lowerCamelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
_lowerCamelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
_lowerCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
_lowerCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def snake_case__ ( self ):
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = '''Encode this sequence.'''
_lowerCamelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
_lowerCamelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
_lowerCamelCase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
# Testing spaces after special tokens
_lowerCamelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ )} ) # mask token has a left space
_lowerCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
_lowerCamelCase = '''Encode <mask> sequence'''
_lowerCamelCase = '''Encode <mask>sequence'''
_lowerCamelCase = tokenizer.encode(lowerCamelCase__ )
_lowerCamelCase = encoded.index(lowerCamelCase__ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = tokenizer.encode(lowerCamelCase__ )
_lowerCamelCase = encoded.index(lowerCamelCase__ )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = '''A, <mask> AllenNLP sentence.'''
_lowerCamelCase = tokenizer_r.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
_lowerCamelCase = tokenizer_p.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_lowerCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_lowerCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def snake_case__ ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
_lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowerCamelCase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowerCamelCase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowerCamelCase__ )
def snake_case__ ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCamelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCamelCase = F"""{text_of_1_token} {text_of_1_token}"""
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
_lowerCamelCase = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
_lowerCamelCase = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
_lowerCamelCase = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ), len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
_lowerCamelCase = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ), len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
_lowerCamelCase = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
_lowerCamelCase = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
_lowerCamelCase = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ), 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
_lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
_lowerCamelCase = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ), 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
| 623 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 1 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
def lowerCAmelCase_( lowercase_ : Dict=2 , lowercase_ : Optional[int]=3 , lowercase_ : List[Any]=16 , lowercase_ : int = 10 , lowercase_ : int = 2 ) -> Dict:
def get_dataset(lowercase_ : Tuple ):
_lowerCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_lowerCamelCase = get_dataset(lowercase_ )
_lowerCamelCase = get_dataset(lowercase_ )
_lowerCamelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
_lowerCamelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCAmelCase_( lowercase_ : int , lowercase_ : str , lowercase_ : Any , lowercase_ : str , lowercase_ : Any , lowercase_ : Tuple=None ) -> Optional[Any]:
_lowerCamelCase = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
_lowerCamelCase , _lowerCamelCase = batch
_lowerCamelCase = model(lowercase_ )
_lowerCamelCase = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
_lowerCamelCase = nn.Parameter(torch.randn(1 ) )
_lowerCamelCase = nn.Parameter(torch.randn(1 ) )
def snake_case__ ( self , lowerCamelCase__ ):
return x * self.a + self.b
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_lowerCamelCase = DummyModel()
_lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_lowerCamelCase , _lowerCamelCase = dummy_dataloaders()
_lowerCamelCase = ProjectConfiguration(total_limit=1 , project_dir=lowerCamelCase__ , automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
_lowerCamelCase = Accelerator(project_config=lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def snake_case__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_lowerCamelCase = DummyModel()
_lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_lowerCamelCase , _lowerCamelCase = dummy_dataloaders()
# Train baseline
_lowerCamelCase = Accelerator()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''initial''' )
accelerator.save_state(lowerCamelCase__ )
((_lowerCamelCase) , (_lowerCamelCase)) = model.a.item(), model.b.item()
_lowerCamelCase = optimizer.state_dict()
_lowerCamelCase = train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((_lowerCamelCase) , (_lowerCamelCase)) = model.a.item(), model.b.item()
_lowerCamelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_lowerCamelCase = DummyModel()
_lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_lowerCamelCase , _lowerCamelCase = dummy_dataloaders()
_lowerCamelCase = Accelerator()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.load_state(lowerCamelCase__ )
((_lowerCamelCase) , (_lowerCamelCase)) = model.a.item(), model.b.item()
_lowerCamelCase = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save everything
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''checkpoint''' )
accelerator.save_state(lowerCamelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(lowerCamelCase__ )
test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((_lowerCamelCase) , (_lowerCamelCase)) = model.a.item(), model.b.item()
_lowerCamelCase = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_lowerCamelCase = DummyModel()
_lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_lowerCamelCase , _lowerCamelCase = dummy_dataloaders()
_lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
_lowerCamelCase = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
((_lowerCamelCase) , (_lowerCamelCase)) = model.a.item(), model.b.item()
_lowerCamelCase = optimizer.state_dict()
_lowerCamelCase = train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((_lowerCamelCase) , (_lowerCamelCase)) = model.a.item(), model.b.item()
_lowerCamelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
_lowerCamelCase = DummyModel()
_lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_lowerCamelCase , _lowerCamelCase = dummy_dataloaders()
_lowerCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCamelCase__ )
_lowerCamelCase = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.load_state(os.path.join(lowerCamelCase__ , '''checkpoints''' , '''checkpoint_0''' ) )
((_lowerCamelCase) , (_lowerCamelCase)) = model.a.item(), model.b.item()
_lowerCamelCase = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((_lowerCamelCase) , (_lowerCamelCase)) = model.a.item(), model.b.item()
_lowerCamelCase = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = torch.tensor([1, 2, 3] )
_lowerCamelCase = torch.tensor([2, 3, 4] )
_lowerCamelCase = DummyModel()
_lowerCamelCase = torch.optim.Adam(net.parameters() )
_lowerCamelCase = Accelerator()
with self.assertRaises(lowerCamelCase__ ) as ve:
accelerator.register_for_checkpointing(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def snake_case__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_lowerCamelCase = DummyModel()
_lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
_lowerCamelCase = torch.optim.lr_scheduler.StepLR(lowerCamelCase__ , step_size=1 , gamma=0.9_9 )
_lowerCamelCase , _lowerCamelCase = dummy_dataloaders()
_lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
_lowerCamelCase = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
_lowerCamelCase = scheduler.state_dict()
train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(lowerCamelCase__ , scheduler.state_dict() )
def snake_case__ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
_lowerCamelCase = DummyModel()
_lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ , total_limit=2 )
# Train baseline
_lowerCamelCase = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
_lowerCamelCase = accelerator.prepare(lowerCamelCase__ )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCamelCase__ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def snake_case__ ( self ):
_lowerCamelCase = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = '''/tmp/accelerate/state_checkpointing'''
__SCREAMING_SNAKE_CASE : str = DummyModel()
__SCREAMING_SNAKE_CASE : int = torch.optim.Adam(params=model.parameters(), lr=1e-3)
__SCREAMING_SNAKE_CASE : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = dummy_dataloaders()
__SCREAMING_SNAKE_CASE : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__SCREAMING_SNAKE_CASE : str = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__SCREAMING_SNAKE_CASE : Optional[Any] = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
__SCREAMING_SNAKE_CASE : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
__SCREAMING_SNAKE_CASE : Tuple = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
__SCREAMING_SNAKE_CASE : Optional[int] = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 623 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 1 |
"""simple docstring"""
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = {}
def snake_case__ ( self , lowerCamelCase__ ):
if vertex not in self.adjacency:
_lowerCamelCase = {}
self.num_vertices += 1
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
self.add_vertex(lowerCamelCase__ )
self.add_vertex(lowerCamelCase__ )
if head == tail:
return
_lowerCamelCase = weight
_lowerCamelCase = weight
def snake_case__ ( self ):
_lowerCamelCase = self.get_edges()
for edge in edges:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase__ ) ):
_lowerCamelCase = list(edges[i] )
edges.sort(key=lambda lowerCamelCase__ : e[2] )
for i in range(len(lowerCamelCase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_lowerCamelCase = edges[i][2] + 1
for edge in edges:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = edge
_lowerCamelCase = weight
_lowerCamelCase = weight
def __str__( self ):
_lowerCamelCase = ''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCamelCase = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip('''\n''' )
def snake_case__ ( self ):
_lowerCamelCase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def snake_case__ ( self ):
return self.adjacency.keys()
@staticmethod
def snake_case__ ( lowerCamelCase__=None , lowerCamelCase__=None ):
_lowerCamelCase = Graph()
if vertices is None:
_lowerCamelCase = []
if edges is None:
_lowerCamelCase = []
for vertex in vertices:
g.add_vertex(lowerCamelCase__ )
for edge in edges:
g.add_edge(*lowerCamelCase__ )
return g
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = {}
_lowerCamelCase = {}
def __len__( self ):
return len(self.parent )
def snake_case__ ( self , lowerCamelCase__ ):
if item in self.parent:
return self.find(lowerCamelCase__ )
_lowerCamelCase = item
_lowerCamelCase = 0
return item
def snake_case__ ( self , lowerCamelCase__ ):
if item not in self.parent:
return self.make_set(lowerCamelCase__ )
if item != self.parent[item]:
_lowerCamelCase = self.find(self.parent[item] )
return self.parent[item]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.find(lowerCamelCase__ )
_lowerCamelCase = self.find(lowerCamelCase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCamelCase = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCamelCase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCamelCase = roota
return roota
return None
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
_lowerCamelCase = graph.num_vertices
_lowerCamelCase = Graph.UnionFind()
_lowerCamelCase = []
while num_components > 1:
_lowerCamelCase = {}
for vertex in graph.get_vertices():
_lowerCamelCase = -1
_lowerCamelCase = graph.get_edges()
for edge in edges:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = edge
edges.remove((tail, head, weight) )
for edge in edges:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = edge
_lowerCamelCase = union_find.find(lowerCamelCase__ )
_lowerCamelCase = union_find.find(lowerCamelCase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCamelCase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCamelCase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = cheap_edge[vertex]
if union_find.find(lowerCamelCase__ ) != union_find.find(lowerCamelCase__ ):
union_find.union(lowerCamelCase__ , lowerCamelCase__ )
mst_edges.append(cheap_edge[vertex] )
_lowerCamelCase = num_components - 1
_lowerCamelCase = Graph.build(edges=lowerCamelCase__ )
return mst
| 623 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCAmelCase_( lowercase_ : list[Any] ) -> None:
create_state_space_tree(lowercase_ , [] , 0 )
def lowerCAmelCase_( lowercase_ : list[Any] , lowercase_ : list[Any] , lowercase_ : int ) -> None:
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 623 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
__SCREAMING_SNAKE_CASE : str = tuple[int, int]
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = vertices
_lowerCamelCase = {
(min(lowerCamelCase__ ), max(lowerCamelCase__ )): weight for edge, weight in edges.items()
}
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_lowerCamelCase = weight
def snake_case__ ( self ):
_lowerCamelCase = Graph({min(self.vertices )} , {} )
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
_lowerCamelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_lowerCamelCase = edge
_lowerCamelCase = weight
subgraph.add_edge(lowerCamelCase__ , lowerCamelCase__ )
return subgraph
def lowerCAmelCase_( lowercase_ : str = "p107_network.txt" ) -> int:
_lowerCamelCase = os.path.abspath(os.path.dirname(lowercase_ ) )
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
_lowerCamelCase = {}
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
with open(lowercase_ ) as f:
_lowerCamelCase = f.read().strip().split('''\n''' )
_lowerCamelCase = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(lowercase_ ) ):
for edgea in range(lowercase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
_lowerCamelCase = int(adjaceny_matrix[edgea][edgea] )
_lowerCamelCase = Graph(set(range(len(lowercase_ ) ) ) , lowercase_ )
_lowerCamelCase = graph.prims_algorithm()
_lowerCamelCase = sum(graph.edges.values() )
_lowerCamelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 623 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase_( A__ ):
'''simple docstring'''
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.', A__, )
| 623 | 1 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
_lowerCamelCase = deepcopy(lowerCamelCase__ )
elif os.path.exists(lowerCamelCase__ ):
with io.open(lowerCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
_lowerCamelCase = json.load(lowerCamelCase__ )
else:
try:
_lowerCamelCase = baseaa.urlsafe_baadecode(lowerCamelCase__ ).decode('''utf-8''' )
_lowerCamelCase = json.loads(lowerCamelCase__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
_lowerCamelCase = config
self.set_stage_and_offload()
def snake_case__ ( self ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
_lowerCamelCase = self.get_value('''zero_optimization.stage''' , -1 )
# offload
_lowerCamelCase = False
if self.is_zeroa() or self.is_zeroa():
_lowerCamelCase = set(['''cpu''', '''nvme'''] )
_lowerCamelCase = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
_lowerCamelCase = True
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.config
# find the config node of interest if it exists
_lowerCamelCase = ds_key_long.split('''.''' )
_lowerCamelCase = nodes.pop()
for node in nodes:
_lowerCamelCase = config.get(lowerCamelCase__ )
if config is None:
return None, ds_key
return config, ds_key
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ):
_lowerCamelCase , _lowerCamelCase = self.find_config_node(lowerCamelCase__ )
if config is None:
return default
return config.get(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=False ):
_lowerCamelCase = self.config
# find the config node of interest if it exists
_lowerCamelCase = ds_key_long.split('''.''' )
for node in nodes:
_lowerCamelCase = config
_lowerCamelCase = config.get(lowerCamelCase__ )
if config is None:
if must_exist:
raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.get_value(lowerCamelCase__ )
return False if value is None else bool(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.get_value(lowerCamelCase__ )
return False if value is None else not bool(lowerCamelCase__ )
def snake_case__ ( self ):
return self._stage == 2
def snake_case__ ( self ):
return self._stage == 3
def snake_case__ ( self ):
return self._offload
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = engine
def snake_case__ ( self , lowerCamelCase__ , **lowerCamelCase__ ):
# runs backpropagation and handles mixed precision
self.engine.backward(lowerCamelCase__ , **lowerCamelCase__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
super().__init__(lowerCamelCase__ , device_placement=lowerCamelCase__ , scaler=lowerCamelCase__ )
_lowerCamelCase = hasattr(self.optimizer , '''overflow''' )
def snake_case__ ( self , lowerCamelCase__=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def snake_case__ ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def snake_case__ ( self ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=0.0_0_1 , lowerCamelCase__=0 , **lowerCamelCase__ ):
_lowerCamelCase = params
_lowerCamelCase = lr
_lowerCamelCase = weight_decay
_lowerCamelCase = kwargs
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=0 , **lowerCamelCase__ ):
_lowerCamelCase = optimizer
_lowerCamelCase = total_num_steps
_lowerCamelCase = warmup_num_steps
_lowerCamelCase = kwargs
| 623 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] ) -> Dict:
# Load configuration defined in the metadata file
with open(lowercase_ ) as metadata_file:
_lowerCamelCase = json.load(lowercase_ )
_lowerCamelCase = LukeConfig(use_entity_aware_attention=lowercase_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' )
# Load the entity vocab file
_lowerCamelCase = load_entity_vocab(lowercase_ )
_lowerCamelCase = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCamelCase = AddedToken('''<ent>''' , lstrip=lowercase_ , rstrip=lowercase_ )
_lowerCamelCase = AddedToken('''<ent2>''' , lstrip=lowercase_ , rstrip=lowercase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_lowerCamelCase = state_dict['''embeddings.word_embeddings.weight''']
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_lowerCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCamelCase = F"""encoder.layer.{layer_index}.attention.self."""
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCamelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_lowerCamelCase = entity_emb[entity_vocab['''[MASK]''']]
_lowerCamelCase = LukeModel(config=lowercase_ ).eval()
_lowerCamelCase , _lowerCamelCase = model.load_state_dict(lowercase_ , strict=lowercase_ )
if not (len(lowercase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(lowercase_ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
_lowerCamelCase = LukeTokenizer.from_pretrained(lowercase_ , task='''entity_classification''' )
_lowerCamelCase = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_lowerCamelCase = (39, 42)
_lowerCamelCase = tokenizer(lowercase_ , entity_spans=[span] , add_prefix_space=lowercase_ , return_tensors='''pt''' )
_lowerCamelCase = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 42, 10_24) )
_lowerCamelCase = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 42, 7_68) )
_lowerCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_lowerCamelCase = torch.Size((1, 1, 10_24) )
_lowerCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
_lowerCamelCase = torch.Size((1, 1, 7_68) )
_lowerCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Any:
_lowerCamelCase = {}
with open(lowercase_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(lowercase_ ):
_lowerCamelCase , _lowerCamelCase = line.rstrip().split('''\t''' )
_lowerCamelCase = index
return entity_vocab
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 623 | 1 |
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Tuple = list[list[int]]
# assigning initial values to the grid
__SCREAMING_SNAKE_CASE : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__SCREAMING_SNAKE_CASE : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCAmelCase_( lowercase_ : Matrix , lowercase_ : int , lowercase_ : int , lowercase_ : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCAmelCase_( lowercase_ : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCAmelCase_( lowercase_ : Matrix ) -> Matrix | None:
if location := find_empty_location(lowercase_ ):
_lowerCamelCase , _lowerCamelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
_lowerCamelCase = digit
if sudoku(lowercase_ ) is not None:
return grid
_lowerCamelCase = 0
return None
def lowerCAmelCase_( lowercase_ : Matrix ) -> None:
for row in grid:
for cell in row:
print(lowercase_ , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 2_0)
print_solution(example_grid)
print('''\nExample grid solution:''')
__SCREAMING_SNAKE_CASE : Any = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 623 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=3_0 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=3 , lowerCamelCase__=0.6 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = mask_ratio
_lowerCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase = (image_size // patch_size) ** 2
_lowerCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEModel(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
# expected sequence length = num_patches
_lowerCamelCase = (self.image_size // self.patch_size) ** 2
_lowerCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
_lowerCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase__ : Dict = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
lowercase__ : str = False
lowercase__ : List[str] = False
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , tf.keras.layers.Layer ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = outputs_dict[0].numpy()
_lowerCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase__ ):
_lowerCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase__ ):
_lowerCamelCase = v.numpy()
else:
_lowerCamelCase = np.array(lowerCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = prepare_numpy_arrays(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# make masks reproducible
np.random.seed(2 )
_lowerCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase__ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(lowerCamelCase__ , lowerCamelCase__ ),)
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase__ , '''_keras_serializable''' , lowerCamelCase__ )
}
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
_lowerCamelCase = main_layer_class(lowerCamelCase__ )
_lowerCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_lowerCamelCase = tf.keras.Model(lowerCamelCase__ , outputs=main_layer(lowerCamelCase__ ) )
_lowerCamelCase = model(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''keras_model.h5''' )
model.save(lowerCamelCase__ )
_lowerCamelCase = tf.keras.models.load_model(
lowerCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase__ , tf.keras.Model )
_lowerCamelCase = model(lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = outputs.last_hidden_state.numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = outputs.logits.numpy()
_lowerCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ , saved_model=lowerCamelCase__ )
_lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = after_outputs['''last_hidden_state'''].numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = after_outputs['''logits'''].numpy()
_lowerCamelCase = 0
_lowerCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1e-5 )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase__ )
_lowerCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_lowerCamelCase = model_class.from_config(model.config )
_lowerCamelCase = new_model(lowerCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
_lowerCamelCase = new_model(lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def snake_case__ ( self ):
pass
@slow
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_lowerCamelCase = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase = ViTMAEConfig()
_lowerCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
# verify the logits
_lowerCamelCase = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
| 623 | 1 |
"""simple docstring"""
from math import isqrt
def lowerCAmelCase_( lowercase_ : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase_ ) + 1 ) )
def lowerCAmelCase_( lowercase_ : int = 10**6 ) -> int:
_lowerCamelCase = 0
_lowerCamelCase = 1
_lowerCamelCase = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 623 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_( lowercase_ : str = "laptop" ) -> DataFrame:
_lowerCamelCase = F"""https://www.amazon.in/laptop/s?k={product}"""
_lowerCamelCase = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_lowerCamelCase = BeautifulSoup(requests.get(lowercase_ , headers=lowercase_ ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCamelCase = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
_lowerCamelCase = item.ha.text
_lowerCamelCase = '''https://www.amazon.in/''' + item.ha.a['''href''']
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
_lowerCamelCase = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
_lowerCamelCase = '''Not available'''
try:
_lowerCamelCase = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
_lowerCamelCase = ''''''
try:
_lowerCamelCase = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
_lowerCamelCase = float('''nan''' )
except AttributeError:
pass
_lowerCamelCase = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCamelCase = ''' '''
_lowerCamelCase = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = '''headphones'''
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 623 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCamelCase_:
'''simple docstring'''
lowercase__ : List[Any] = XGLMConfig
lowercase__ : Optional[Any] = {}
lowercase__ : Optional[int] = 'gelu'
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_4 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=0.0_2 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = ffn_dim
_lowerCamelCase = activation_function
_lowerCamelCase = activation_dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = initializer_range
_lowerCamelCase = None
_lowerCamelCase = 0
_lowerCamelCase = 2
_lowerCamelCase = 1
def snake_case__ ( self ):
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def snake_case__ ( self ):
_lowerCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = self.get_config()
_lowerCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case__ ( self ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCamelCase__ , )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowercase__ : Tuple = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowercase__ : Union[str, Any] = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowercase__ : int = False
lowercase__ : Dict = False
lowercase__ : str = False
def snake_case__ ( self ):
_lowerCamelCase = TFXGLMModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , n_embd=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@slow
def snake_case__ ( self ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = TFXGLMModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def snake_case__ ( self ):
super().test_resize_token_embeddings()
@require_tf
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self , lowerCamelCase__=True ):
_lowerCamelCase = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
_lowerCamelCase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCamelCase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
_lowerCamelCase = model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
_lowerCamelCase = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
_lowerCamelCase = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
_lowerCamelCase = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
_lowerCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
_lowerCamelCase = model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ , seed=[7, 0] )
_lowerCamelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase__ )
_lowerCamelCase = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
_lowerCamelCase = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
_lowerCamelCase = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
_lowerCamelCase = '''left'''
# use different length sentences to test batching
_lowerCamelCase = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
_lowerCamelCase = tokenizer(lowerCamelCase__ , return_tensors='''tf''' , padding=lowerCamelCase__ )
_lowerCamelCase = inputs['''input_ids''']
_lowerCamelCase = model.generate(input_ids=lowerCamelCase__ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=1_2 )
_lowerCamelCase = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
_lowerCamelCase = model.generate(input_ids=lowerCamelCase__ , max_new_tokens=1_2 )
_lowerCamelCase = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
_lowerCamelCase = model.generate(input_ids=lowerCamelCase__ , max_new_tokens=1_2 )
_lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
_lowerCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase__ )
_lowerCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase__ )
_lowerCamelCase = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , [non_padded_sentence, padded_sentence] )
| 623 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=3.6 ):
_lowerCamelCase = tokenizer
_lowerCamelCase = tokenizer.bos_token_id
_lowerCamelCase = dataset
_lowerCamelCase = seq_length
_lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
_lowerCamelCase = iter(self.dataset )
_lowerCamelCase = True
while more_examples:
_lowerCamelCase , _lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase = False
break
_lowerCamelCase = tokenizer(lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
_lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCamelCase__ ) , self.seq_length ):
_lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(lowerCamelCase__ ) == self.seq_length:
yield torch.tensor(lowerCamelCase__ )
def lowerCAmelCase_( lowercase_ : Any ) -> Optional[Any]:
_lowerCamelCase = {'''streaming''': True}
_lowerCamelCase = load_dataset(args.dataset_name , split='''train''' , **lowercase_ )
_lowerCamelCase = ConstantLengthDataset(lowercase_ , lowercase_ , seq_length=args.seq_length )
_lowerCamelCase = DataLoader(lowercase_ , batch_size=args.batch_size )
return eval_dataloader
def lowerCAmelCase_( lowercase_ : Tuple ) -> str:
model.eval()
_lowerCamelCase = []
for step, batch in enumerate(lowercase_ ):
with torch.no_grad():
_lowerCamelCase = model(lowercase_ , labels=lowercase_ )
_lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowercase_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase = torch.mean(torch.cat(lowercase_ ) )
try:
_lowerCamelCase = torch.exp(lowercase_ )
except OverflowError:
_lowerCamelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
__SCREAMING_SNAKE_CASE : Dict = Accelerator()
# Parse configuration
__SCREAMING_SNAKE_CASE : Tuple = HfArgumentParser(EvaluationArguments)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
set_seed(args.seed)
# Logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
__SCREAMING_SNAKE_CASE : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__SCREAMING_SNAKE_CASE : str = create_dataloader(args)
# Prepare everything with our `accelerator`.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 623 | 1 |
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__SCREAMING_SNAKE_CASE : List[Any] = get_logger(__name__)
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Tuple=0 ) -> Any:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with FSDP.state_dict_type(
lowercase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowerCamelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(lowercase_ , lowercase_ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowerCamelCase = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(lowercase_ , lowercase_ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowerCamelCase = os.path.join(lowercase_ , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
logger.info(F"""Saving model to {ckpt_dir}""" )
_lowerCamelCase = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=lowercase_ , storage_writer=dist_cp.FileSystemWriter(lowercase_ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Any=0 ) -> str:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowercase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(lowercase_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
_lowerCamelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
logger.info(F"""Loading model from {input_model_file}""" )
_lowerCamelCase = torch.load(lowercase_ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowerCamelCase = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
logger.info(F"""Loading model from {input_model_file}""" )
_lowerCamelCase = torch.load(lowercase_ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowerCamelCase = (
os.path.join(lowercase_ , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
_lowerCamelCase = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=lowercase_ , storage_reader=dist_cp.FileSystemReader(lowercase_ ) , planner=DefaultLoadPlanner() , )
_lowerCamelCase = state_dict['''model''']
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Dict=0 ) -> Tuple:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with FSDP.state_dict_type(
lowercase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_lowerCamelCase = FSDP.optim_state_dict(lowercase_ , lowercase_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
_lowerCamelCase = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(lowercase_ , lowercase_ )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
_lowerCamelCase = os.path.join(lowercase_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(lowercase_ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Union[str, Any]=0 ) -> Tuple:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowercase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
_lowerCamelCase = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
_lowerCamelCase = os.path.join(lowercase_ , lowercase_ )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
_lowerCamelCase = torch.load(lowercase_ )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
_lowerCamelCase = (
os.path.join(lowercase_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
_lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(lowercase_ ) , )
_lowerCamelCase = optim_state['''optimizer''']
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
_lowerCamelCase = FSDP.optim_state_dict_to_load(lowercase_ , lowercase_ , lowercase_ )
optimizer.load_state_dict(lowercase_ )
| 623 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float = 1e-12 , lowercase_ : int = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[1]
# Ensure proper dimensionality.
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowercase_ ) == np.iscomplexobj(lowercase_ )
_lowerCamelCase = np.iscomplexobj(lowercase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowercase_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 1e12
while not convergence:
# Multiple matrix by the vector.
_lowerCamelCase = np.dot(lowercase_ , lowercase_ )
# Normalize the resulting output vector.
_lowerCamelCase = w / np.linalg.norm(lowercase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowerCamelCase = vector.conj().T if is_complex else vector.T
_lowerCamelCase = np.dot(lowercase_ , np.dot(lowercase_ , lowercase_ ) )
# Check convergence.
_lowerCamelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowerCamelCase = True
_lowerCamelCase = lambda_
if is_complex:
_lowerCamelCase = np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_( ) -> None:
_lowerCamelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowerCamelCase = np.array([41, 4, 20] )
_lowerCamelCase = real_input_matrix.astype(np.complexaaa )
_lowerCamelCase = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowerCamelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowerCamelCase = real_input_matrix
_lowerCamelCase = real_vector
elif problem_type == "complex":
_lowerCamelCase = complex_input_matrix
_lowerCamelCase = complex_vector
# Our implementation.
_lowerCamelCase , _lowerCamelCase = power_iteration(lowercase_ , lowercase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowerCamelCase , _lowerCamelCase = np.linalg.eigh(lowercase_ )
# Last eigenvalue is the maximum one.
_lowerCamelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowerCamelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowercase_ ) - np.abs(lowercase_ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 623 | 1 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Any , lowercase_ : int ) -> List[str]:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_lowerCamelCase = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ )
else:
_lowerCamelCase = max(
mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , )
_lowerCamelCase = val
return f[i][j]
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Dict ) -> Optional[int]:
_lowerCamelCase = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_lowerCamelCase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_lowerCamelCase = dp[i - 1][w_]
return dp[n][w_], dp
def lowerCAmelCase_( lowercase_ : int , lowercase_ : list , lowercase_ : list ) -> Tuple:
if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
_lowerCamelCase = len(lowercase_ )
if num_items != len(lowercase_ ):
_lowerCamelCase = (
'''The number of weights must be the same as the number of values.\n'''
F"""But got {num_items} weights and {len(lowercase_ )} values"""
)
raise ValueError(lowercase_ )
for i in range(lowercase_ ):
if not isinstance(wt[i] , lowercase_ ):
_lowerCamelCase = (
'''All weights must be integers but got weight of '''
F"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(lowercase_ )
_lowerCamelCase , _lowerCamelCase = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase = set()
_construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return optimal_val, example_optional_set
def lowerCAmelCase_( lowercase_ : list , lowercase_ : list , lowercase_ : int , lowercase_ : int , lowercase_ : set ) -> Optional[Any]:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ )
else:
optimal_set.add(lowercase_ )
_construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = [3, 2, 4, 4]
__SCREAMING_SNAKE_CASE : Optional[int] = [4, 3, 2, 3]
__SCREAMING_SNAKE_CASE : Optional[Any] = 4
__SCREAMING_SNAKE_CASE : List[Any] = 6
__SCREAMING_SNAKE_CASE : List[Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 623 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 1 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : int , lowercase_ : int ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_lowerCamelCase = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
_lowerCamelCase = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
_lowerCamelCase = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__SCREAMING_SNAKE_CASE : List[str] = numpy.array([0, 0])
__SCREAMING_SNAKE_CASE : Optional[Any] = numpy.array([0.5, 0.866_0254])
__SCREAMING_SNAKE_CASE : Tuple = numpy.array([1, 0])
__SCREAMING_SNAKE_CASE : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] , lowercase_ : int ) -> list[numpy.ndarray]:
_lowerCamelCase = initial_vectors
for _ in range(lowercase_ ):
_lowerCamelCase = iteration_step(lowercase_ )
return vectors
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
_lowerCamelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCamelCase = vectors[i + 1]
new_vectors.append(lowercase_ )
_lowerCamelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase_( lowercase_ : numpy.ndarray , lowercase_ : float ) -> numpy.ndarray:
_lowerCamelCase = numpy.radians(lowercase_ )
_lowerCamelCase , _lowerCamelCase = numpy.cos(lowercase_ ), numpy.sin(lowercase_ )
_lowerCamelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase_ , lowercase_ )
def lowerCAmelCase_( lowercase_ : list[numpy.ndarray] ) -> None:
_lowerCamelCase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCamelCase , _lowerCamelCase = zip(*lowercase_ )
plt.plot(lowercase_ , lowercase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : str = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 623 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
__SCREAMING_SNAKE_CASE : Optional[Any] = list[tuple[int, int]]
__SCREAMING_SNAKE_CASE : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__SCREAMING_SNAKE_CASE : Optional[int] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = pos_x
_lowerCamelCase = pos_y
_lowerCamelCase = (pos_y, pos_x)
_lowerCamelCase = goal_x
_lowerCamelCase = goal_y
_lowerCamelCase = parent
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCamelCase__ )
_lowerCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCamelCase__ )
_lowerCamelCase = [self.start]
_lowerCamelCase = False
def snake_case__ ( self ):
while self.node_queue:
_lowerCamelCase = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_lowerCamelCase = True
return self.retrace_path(lowerCamelCase__ )
_lowerCamelCase = self.get_successors(lowerCamelCase__ )
for node in successors:
self.node_queue.append(lowerCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = []
for action in delta:
_lowerCamelCase = parent.pos_x + action[1]
_lowerCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCamelCase__ , lowerCamelCase__ , self.target.pos_y , self.target.pos_x , lowerCamelCase__ ) )
return successors
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = node
_lowerCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCamelCase = current_node.parent
path.reverse()
return path
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = BreadthFirstSearch(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = BreadthFirstSearch(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = False
def snake_case__ ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_lowerCamelCase = self.fwd_bfs.node_queue.pop(0 )
_lowerCamelCase = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_lowerCamelCase = True
return self.retrace_bidirectional_path(
lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = current_bwd_node
_lowerCamelCase = current_fwd_node
_lowerCamelCase = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCamelCase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCamelCase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCamelCase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.fwd_bfs.retrace_path(lowerCamelCase__ )
_lowerCamelCase = self.bwd_bfs.retrace_path(lowerCamelCase__ )
bwd_path.pop()
bwd_path.reverse()
_lowerCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : List[Any] = (0, 0)
__SCREAMING_SNAKE_CASE : Tuple = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__SCREAMING_SNAKE_CASE : str = time.time()
__SCREAMING_SNAKE_CASE : Optional[Any] = BreadthFirstSearch(init, goal)
__SCREAMING_SNAKE_CASE : Tuple = bfs.search()
__SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
__SCREAMING_SNAKE_CASE : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
__SCREAMING_SNAKE_CASE : Optional[int] = bd_bfs.search()
__SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 623 |
"""simple docstring"""
from typing import Any
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = data
_lowerCamelCase = None
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = None
def snake_case__ ( self ):
_lowerCamelCase = self.head
while temp is not None:
print(temp.data , end=''' ''' )
_lowerCamelCase = temp.next
print()
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = Node(lowerCamelCase__ )
_lowerCamelCase = self.head
_lowerCamelCase = new_node
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
if node_data_a == node_data_a:
return
else:
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
_lowerCamelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCamelCase = node_a.next
if node_a is None or node_a is None:
return
_lowerCamelCase , _lowerCamelCase = node_a.data, node_a.data
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 623 | 1 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = '''\
Text data.
Second line of data.'''
__SCREAMING_SNAKE_CASE : str = '''file'''
@pytest.fixture(scope='''session''' )
def lowerCAmelCase_( lowercase_ : List[str] ) -> Optional[int]:
_lowerCamelCase = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
_lowerCamelCase = bytes(lowercase_ , '''utf-8''' )
with zstd.open(lowercase_ , '''wb''' ) as f:
f.write(lowercase_ )
return path
@pytest.fixture
def lowerCAmelCase_( lowercase_ : str ) -> Optional[int]:
with open(os.path.join(tmpfs.local_root_dir , lowercase_ ) , '''w''' ) as f:
f.write(lowercase_ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : str ) -> Any:
_lowerCamelCase = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
_lowerCamelCase = input_paths[compression_format]
_lowerCamelCase = tmp_path / '''cache'''
_lowerCamelCase = DownloadConfig(cache_dir=lowercase_ , extract_compressed_file=lowercase_ )
_lowerCamelCase = cached_path(lowercase_ , download_config=lowercase_ )
with open(lowercase_ ) as f:
_lowerCamelCase = f.read()
with open(lowercase_ ) as f:
_lowerCamelCase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] ) -> Optional[int]:
_lowerCamelCase = '''custom_cache'''
_lowerCamelCase = '''custom_extracted_dir'''
_lowerCamelCase = tmp_path / '''custom_extracted_path'''
if default_extracted:
_lowerCamelCase = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , lowercase_ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
_lowerCamelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase = xz_file
_lowerCamelCase = (
DownloadConfig(extract_compressed_file=lowercase_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowercase_ )
)
_lowerCamelCase = cached_path(lowercase_ , download_config=lowercase_ )
assert Path(lowercase_ ).parent.parts[-2:] == expected
def lowerCAmelCase_( lowercase_ : Any ) -> Optional[int]:
# absolute path
_lowerCamelCase = str(Path(lowercase_ ).resolve() )
assert cached_path(lowercase_ ) == text_file
# relative path
_lowerCamelCase = str(Path(lowercase_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase_ ) == text_file
def lowerCAmelCase_( lowercase_ : str ) -> List[str]:
# absolute path
_lowerCamelCase = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(lowercase_ ):
cached_path(lowercase_ )
# relative path
_lowerCamelCase = '''./__missing_file__.txt'''
with pytest.raises(lowercase_ ):
cached_path(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[Any] ) -> Tuple:
_lowerCamelCase = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(lowercase_ ) as f:
_lowerCamelCase = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase_ )
def lowerCAmelCase_( ) -> Optional[Any]:
with pytest.raises(lowercase_ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[int] ) -> int:
_lowerCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowercase_ ):
http_get('''https://huggingface.co''' , temp_file=lowercase_ )
with pytest.raises(lowercase_ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase_ )
def lowerCAmelCase_( lowercase_ : int ) -> Optional[int]:
_lowerCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowercase_ ):
ftp_get('''ftp://huggingface.co''' , temp_file=lowercase_ )
with pytest.raises(lowercase_ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase_ )
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Any:
_lowerCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowercase_ ):
fsspec_get('''s3://huggingface.co''' , temp_file=lowercase_ )
with pytest.raises(lowercase_ ):
fsspec_head('''s3://huggingface.co''' )
| 623 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__SCREAMING_SNAKE_CASE : Optional[Any] = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCAmelCase_( lowercase_ : List[Any] ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[str] ) -> List[str]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
| 623 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.