code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ : Union[str, Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 414 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class __lowerCAmelCase ( __a ):
snake_case : Tuple = """lilt"""
def __init__(self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=None , lowerCAmelCase__=4 , lowerCAmelCase__=1_0_2_4 , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : int = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : int = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : List[Any] = type_vocab_size
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : List[Any] = position_embedding_type
_UpperCAmelCase : Optional[int] = classifier_dropout
_UpperCAmelCase : Optional[Any] = channel_shrink_ratio
_UpperCAmelCase : Tuple = max_ad_position_embeddings
| 414 | 1 |
__snake_case : Optional[Any] = 0 # The first color of the flag.
__snake_case : Optional[Any] = 1 # The second color of the flag.
__snake_case : Optional[Any] = 2 # The third color of the flag.
__snake_case : Optional[int] = (red, white, blue)
def _UpperCamelCase ( UpperCamelCase_ : list ) -> Optional[int]:
"""simple docstring"""
if not sequence:
return []
if len(lowerCAmelCase__ ) == 1:
return list(lowerCAmelCase__ )
lowerCAmelCase__ = 0
lowerCAmelCase__ = len(lowerCAmelCase__ ) - 1
lowerCAmelCase__ = 0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCAmelCase__ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCAmelCase__ = sequence[high], sequence[mid]
high -= 1
else:
lowerCAmelCase__ = F"The elements inside the sequence must contains only {colors} values"
raise ValueError(lowerCAmelCase__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Optional[int] = input("""Enter numbers separated by commas:\n""").strip()
__snake_case : Union[str, Any] = [int(item.strip()) for item in user_input.split(""",""")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 709 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
__snake_case : Any = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
_SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_SCREAMING_SNAKE_CASE : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
_SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The input training data file (a text file).'''})
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''})
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.train_file is not None:
lowerCAmelCase__ = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCAmelCase__ = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __SCREAMING_SNAKE_CASE :
_SCREAMING_SNAKE_CASE : PreTrainedTokenizerBase
_SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = True
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
def __call__( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'label' if 'label' in features[0].keys() else 'labels'
lowerCAmelCase__ = [feature.pop(_UpperCamelCase ) for feature in features]
lowerCAmelCase__ = len(_UpperCamelCase )
lowerCAmelCase__ = len(features[0]['input_ids'] )
lowerCAmelCase__ = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCamelCase )] for feature in features
]
lowerCAmelCase__ = list(chain(*_UpperCamelCase ) )
lowerCAmelCase__ = self.tokenizer.pad(
_UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
lowerCAmelCase__ = {k: v.view(_UpperCamelCase , _UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
lowerCAmelCase__ = torch.tensor(_UpperCamelCase , dtype=torch.intaa )
return batch
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , UpperCamelCase_ , UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase_ )
datasets.utils.logging.set_verbosity(UpperCamelCase_ )
transformers.utils.logging.set_verbosity(UpperCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCAmelCase__ = {}
if data_args.train_file is not None:
lowerCAmelCase__ = data_args.train_file
if data_args.validation_file is not None:
lowerCAmelCase__ = data_args.validation_file
lowerCAmelCase__ = data_args.train_file.split('.' )[-1]
lowerCAmelCase__ = load_dataset(
UpperCamelCase_ , data_files=UpperCamelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCAmelCase__ = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCAmelCase__ = [F"ending{i}" for i in range(4 )]
lowerCAmelCase__ = 'sent1'
lowerCAmelCase__ = 'sent2'
if data_args.max_seq_length is None:
lowerCAmelCase__ = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
lowerCAmelCase__ = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCAmelCase__ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase_ : Tuple ):
lowerCAmelCase__ = [[context] * 4 for context in examples[context_name]]
lowerCAmelCase__ = examples[question_header_name]
lowerCAmelCase__ = [
[F"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase_ )
]
# Flatten out
lowerCAmelCase__ = list(chain(*UpperCamelCase_ ) )
lowerCAmelCase__ = list(chain(*UpperCamelCase_ ) )
# Tokenize
lowerCAmelCase__ = tokenizer(
UpperCamelCase_ , UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
lowerCAmelCase__ = raw_datasets['train']
if data_args.max_train_samples is not None:
lowerCAmelCase__ = min(len(UpperCamelCase_ ) , data_args.max_train_samples )
lowerCAmelCase__ = train_dataset.select(range(UpperCamelCase_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCAmelCase__ = train_dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
lowerCAmelCase__ = raw_datasets['validation']
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = min(len(UpperCamelCase_ ) , data_args.max_eval_samples )
lowerCAmelCase__ = eval_dataset.select(range(UpperCamelCase_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCAmelCase__ = eval_dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCAmelCase__ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase__ , lowerCAmelCase__ = eval_predictions
lowerCAmelCase__ = np.argmax(UpperCamelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase_ , data_collator=UpperCamelCase_ , compute_metrics=UpperCamelCase_ , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=UpperCamelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCAmelCase__ = train_result.metrics
lowerCAmelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase_ )
)
lowerCAmelCase__ = min(UpperCamelCase_ , len(UpperCamelCase_ ) )
trainer.log_metrics('train' , UpperCamelCase_ )
trainer.save_metrics('train' , UpperCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase__ = trainer.evaluate()
lowerCAmelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase_ )
lowerCAmelCase__ = min(UpperCamelCase_ , len(UpperCamelCase_ ) )
trainer.log_metrics('eval' , UpperCamelCase_ )
trainer.save_metrics('eval' , UpperCamelCase_ )
lowerCAmelCase__ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase_ )
else:
trainer.create_model_card(**UpperCamelCase_ )
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 365 | 0 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : str ) -> None:
"""simple docstring"""
warnings.warn(
"""The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PerceiverImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 529 |
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowerCAmelCase :List[str] = 16
_lowerCAmelCase :Any = 32
def lowerCamelCase_ (UpperCamelCase__ : int ):
return int(x / 2**20 )
class _UpperCAmelCase :
'''simple docstring'''
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Optional[int] = torch.cuda.memory_allocated()
return self
def __exit__( self , *A ) -> Any:
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : Optional[int] = torch.cuda.memory_allocated()
_UpperCAmelCase : int = torch.cuda.max_memory_allocated()
_UpperCAmelCase : str = bamb(self.end - self.begin )
_UpperCAmelCase : Any = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase_ (UpperCamelCase__ : Accelerator , UpperCamelCase__ : int = 16 , UpperCamelCase__ : str = "bert-base-cased" , UpperCamelCase__ : int = 320 , UpperCamelCase__ : int = 160 , ):
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
_UpperCAmelCase : int = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': F'train[:{n_train}]', '''validation''': F'validation[:{n_val}]'} )
def tokenize_function(UpperCamelCase__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=UpperCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(UpperCamelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
_UpperCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ (UpperCamelCase__ : Tuple , UpperCamelCase__ : Any ):
# Initialize accelerator
_UpperCAmelCase : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[str] = config['''lr''']
_UpperCAmelCase : List[Any] = int(config['''num_epochs'''] )
_UpperCAmelCase : Optional[int] = int(config['''seed'''] )
_UpperCAmelCase : Optional[Any] = int(config['''batch_size'''] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(UpperCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase : str = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ )
# Instantiate optimizer
_UpperCAmelCase : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : Optional[int] = optimizer_cls(params=model.parameters() , lr=UpperCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
_UpperCAmelCase : str = 1
_UpperCAmelCase : List[Any] = (len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=0 , num_training_steps=UpperCamelCase__ , )
else:
_UpperCAmelCase : str = DummyScheduler(UpperCamelCase__ , total_num_steps=UpperCamelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : Optional[Any] = 0
# Now we train the model
_UpperCAmelCase : List[str] = {}
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
_UpperCAmelCase : Optional[int] = model(**UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = outputs.loss
_UpperCAmelCase : Tuple = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ():
_UpperCAmelCase : List[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=UpperCamelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCamelCase__ , )
parser.add_argument(
'''--output_dir''' , type=UpperCamelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=UpperCamelCase__ , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=UpperCamelCase__ , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=UpperCamelCase__ , default=1 , help='''Number of train epochs.''' , )
_UpperCAmelCase : List[Any] = parser.parse_args()
_UpperCAmelCase : List[str] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 506 | 0 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
UpperCAmelCase__ :List[Any] = []
for old_item in old_list:
UpperCAmelCase__ :Dict = old_item.replace('in_layers.0' , 'norm1' )
UpperCAmelCase__ :Optional[Any] = new_item.replace('in_layers.2' , 'conv1' )
UpperCAmelCase__ :Tuple = new_item.replace('out_layers.0' , 'norm2' )
UpperCAmelCase__ :Tuple = new_item.replace('out_layers.3' , 'conv2' )
UpperCAmelCase__ :List[str] = new_item.replace('emb_layers.1' , 'time_emb_proj' )
UpperCAmelCase__ :Tuple = new_item.replace('skip_connection' , 'conv_shortcut' )
UpperCAmelCase__ :Tuple = shave_segments(SCREAMING_SNAKE_CASE , n_shave_prefix_segments=SCREAMING_SNAKE_CASE )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
UpperCAmelCase__ :Optional[int] = []
for old_item in old_list:
UpperCAmelCase__ :Optional[int] = old_item
UpperCAmelCase__ :List[str] = new_item.replace('norm.weight' , 'group_norm.weight' )
UpperCAmelCase__ :Optional[int] = new_item.replace('norm.bias' , 'group_norm.bias' )
UpperCAmelCase__ :Optional[Any] = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
UpperCAmelCase__ :Optional[int] = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
UpperCAmelCase__ :str = shave_segments(SCREAMING_SNAKE_CASE , n_shave_prefix_segments=SCREAMING_SNAKE_CASE )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase__ :str = old_checkpoint[path]
UpperCAmelCase__ :int = old_tensor.shape[0] // 3
UpperCAmelCase__ :int = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase__ :Dict = old_tensor.shape[0] // config['num_head_channels'] // 3
UpperCAmelCase__ :str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase__ :Tuple = old_tensor.split(channels // num_heads , dim=1 )
UpperCAmelCase__ :Union[str, Any] = query.reshape(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :List[str] = key.reshape(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :List[str] = value.reshape(SCREAMING_SNAKE_CASE )
for path in paths:
UpperCAmelCase__ :int = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase__ :Union[str, Any] = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
UpperCAmelCase__ :Optional[int] = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
UpperCAmelCase__ :Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase__ :Tuple = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase__ :Tuple = old_checkpoint[path['old']][:, :, 0]
else:
UpperCAmelCase__ :Union[str, Any] = old_checkpoint[path['old']]
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :Dict = {}
UpperCAmelCase__ :Optional[int] = checkpoint['time_embed.0.weight']
UpperCAmelCase__ :List[str] = checkpoint['time_embed.0.bias']
UpperCAmelCase__ :List[str] = checkpoint['time_embed.2.weight']
UpperCAmelCase__ :Optional[Any] = checkpoint['time_embed.2.bias']
UpperCAmelCase__ :List[str] = checkpoint['input_blocks.0.0.weight']
UpperCAmelCase__ :List[str] = checkpoint['input_blocks.0.0.bias']
UpperCAmelCase__ :Optional[Any] = checkpoint['out.0.weight']
UpperCAmelCase__ :str = checkpoint['out.0.bias']
UpperCAmelCase__ :Tuple = checkpoint['out.2.weight']
UpperCAmelCase__ :int = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
UpperCAmelCase__ :List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
UpperCAmelCase__ :Optional[Any] = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase__ :Union[str, Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
UpperCAmelCase__ :Optional[Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase__ :List[str] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
UpperCAmelCase__ :Optional[Any] = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(SCREAMING_SNAKE_CASE )
}
for i in range(1 , SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ :Union[str, Any] = (i - 1) // (config['num_res_blocks'] + 1)
UpperCAmelCase__ :List[Any] = (i - 1) % (config['num_res_blocks'] + 1)
UpperCAmelCase__ :Union[str, Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
UpperCAmelCase__ :Any = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
UpperCAmelCase__ :Union[str, Any] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
UpperCAmelCase__ :Union[str, Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
UpperCAmelCase__ :int = renew_resnet_paths(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
UpperCAmelCase__ :Dict = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , additional_replacements=[meta_path, resnet_op] , config=SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ :Union[str, Any] = renew_attention_paths(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Any = {
'old': f"""input_blocks.{i}.1""",
'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase__ :Optional[Any] = {
f"""input_blocks.{i}.1.qkv.bias""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE , )
UpperCAmelCase__ :Optional[int] = middle_blocks[0]
UpperCAmelCase__ :Optional[int] = middle_blocks[1]
UpperCAmelCase__ :List[Any] = middle_blocks[2]
UpperCAmelCase__ :str = renew_resnet_paths(SCREAMING_SNAKE_CASE )
assign_to_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :List[str] = renew_resnet_paths(SCREAMING_SNAKE_CASE )
assign_to_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Tuple = renew_attention_paths(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Union[str, Any] = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , attention_paths_to_split=SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ :List[str] = i // (config['num_res_blocks'] + 1)
UpperCAmelCase__ :Any = i % (config['num_res_blocks'] + 1)
UpperCAmelCase__ :int = [shave_segments(SCREAMING_SNAKE_CASE , 2 ) for name in output_blocks[i]]
UpperCAmelCase__ :Tuple = {}
for layer in output_block_layers:
UpperCAmelCase__ :int = layer.split('.' )[0], shave_segments(SCREAMING_SNAKE_CASE , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase__ :Optional[int] = [layer_name]
if len(SCREAMING_SNAKE_CASE ) > 1:
UpperCAmelCase__ :List[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
UpperCAmelCase__ :str = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
UpperCAmelCase__ :int = renew_resnet_paths(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :List[Any] = renew_resnet_paths(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :List[Any] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase__ :Dict = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
UpperCAmelCase__ :Union[str, Any] = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
UpperCAmelCase__ :List[str] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(SCREAMING_SNAKE_CASE ) == 2:
UpperCAmelCase__ :str = []
if len(SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ :Union[str, Any] = renew_attention_paths(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :List[str] = {
'old': f"""output_blocks.{i}.1""",
'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase__ :Dict = {
f"""output_blocks.{i}.1.qkv.bias""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=SCREAMING_SNAKE_CASE , )
else:
UpperCAmelCase__ :str = renew_resnet_paths(SCREAMING_SNAKE_CASE , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase__ :List[Any] = '.'.join(['output_blocks', str(SCREAMING_SNAKE_CASE ), path['old']] )
UpperCAmelCase__ :Any = '.'.join(['up_blocks', str(SCREAMING_SNAKE_CASE ), 'resnets', str(SCREAMING_SNAKE_CASE ), path['new']] )
UpperCAmelCase__ :List[str] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__snake_case : Union[str, Any] = parser.parse_args()
__snake_case : Optional[int] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__snake_case : int = json.loads(f.read())
__snake_case : Tuple = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__snake_case : Any = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__snake_case : List[Any] = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
__snake_case : Tuple = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
__snake_case : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 700 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__snake_case : Optional[Any] = '\\n\n'
__snake_case : List[Any] = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
__snake_case : Tuple = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCamelCase__ ( datasets.Metric):
'''simple docstring'''
def A__ ( self ) ->int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def A__ ( self , A , A , A = 16 , A = True , A=None ) ->Tuple:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ :Union[str, Any] = 'cuda'
else:
UpperCAmelCase__ :Optional[int] = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase__ :Optional[int] = AutoModelForCausalLM.from_pretrained(A )
UpperCAmelCase__ :Any = model.to(A )
UpperCAmelCase__ :Optional[int] = AutoTokenizer.from_pretrained(A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ :str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ :List[Any] = model.config.max_length - 1
else:
UpperCAmelCase__ :List[Any] = model.config.max_length
UpperCAmelCase__ :Tuple = tokenizer(
A , add_special_tokens=A , padding=A , truncation=A , max_length=A , return_tensors='pt' , return_attention_mask=A , ).to(A )
UpperCAmelCase__ :List[Any] = encodings['input_ids']
UpperCAmelCase__ :str = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ :Union[str, Any] = []
UpperCAmelCase__ :int = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(A ) , A ) ):
UpperCAmelCase__ :int = min(start_index + batch_size , len(A ) )
UpperCAmelCase__ :str = encoded_texts[start_index:end_index]
UpperCAmelCase__ :List[Any] = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ :List[Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A )
UpperCAmelCase__ :Any = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCAmelCase__ :Optional[int] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(A ), attn_mask] , dim=1 )
UpperCAmelCase__ :int = encoded_batch
with torch.no_grad():
UpperCAmelCase__ :Optional[Any] = model(A , attention_mask=A ).logits
UpperCAmelCase__ :str = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ :Dict = labels[..., 1:].contiguous()
UpperCAmelCase__ :Any = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ :int = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A )}
| 433 | 0 |
"""simple docstring"""
from math import factorial
class A_ :
"""simple docstring"""
def __init__( self :str , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =real
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] =[1] * rank
else:
lowerCamelCase__ : List[str] =rank
def __repr__( self :List[Any] ):
"""simple docstring"""
return (
f"""{self.real}+"""
f"""{'+'.join(str(lowerCamelCase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : int =self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase_ )
def __add__( self :str , lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return Dual(self.real + other , self.duals )
lowerCamelCase__ : Union[str, Any] =self.duals.copy()
lowerCamelCase__ : Dict =other.duals.copy()
if len(lowerCamelCase_ ) > len(lowerCamelCase_ ):
o_dual.extend([1] * (len(lowerCamelCase_ ) - len(lowerCamelCase_ )) )
elif len(lowerCamelCase_ ) < len(lowerCamelCase_ ):
s_dual.extend([1] * (len(lowerCamelCase_ ) - len(lowerCamelCase_ )) )
lowerCamelCase__ : int =[]
for i in range(len(lowerCamelCase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ = __add__
def __sub__( self :Dict , lowerCamelCase_ :List[str] ):
"""simple docstring"""
return self + other * -1
def __mul__( self :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] =[]
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase_ )
lowerCamelCase__ : Tuple =[0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ = __mul__
def __truediv__( self :str , lowerCamelCase_ :Tuple ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase__ : str =[]
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase_ )
raise ValueError
def __floordiv__( self :List[Any] , lowerCamelCase_ :List[Any] ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase__ : List[Any] =[]
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase_ )
raise ValueError
def __pow__( self :Union[str, Any] , lowerCamelCase_ :int ):
"""simple docstring"""
if n < 0 or isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
lowerCamelCase__ : List[str] =self
for _ in range(n - 1 ):
x *= self
return x
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Optional[Any] ) ->Any:
if not callable(snake_case_ ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(snake_case_ , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError('differentiate() requires an int as input for order' )
lowerCamelCase__ : Dict =Dual(snake_case_ , 1 )
lowerCamelCase__ : List[str] =func(snake_case_ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) ->List[Any]:
return y**2 * y**4
print(differentiate(f, 9, 2)) | 174 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : str ) ->Union[str, Any]:
lowerCamelCase__ : Tuple =DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
lowerCamelCase__ : Any =1_0_2_4
lowerCamelCase__ : Optional[Any] =4_0_9_6
lowerCamelCase__ : Optional[int] =2_4
lowerCamelCase__ : List[Any] =1_6
lowerCamelCase__ : List[str] =[5, 1_1, 1_7, 2_3]
lowerCamelCase__ : Optional[Any] =[2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
lowerCamelCase__ : Any =(1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ : int =7_6_8
lowerCamelCase__ : Optional[Any] =[1, 1, 1, 0.5]
lowerCamelCase__ : Dict =[2_5_6, 5_1_2, 7_6_8, 7_6_8]
lowerCamelCase__ : Tuple =1_5_0
lowerCamelCase__ : Optional[Any] =1_6
lowerCamelCase__ : int =(1, 3_8_4, 3_8_4)
lowerCamelCase__ : Optional[Any] =False
lowerCamelCase__ : Any ='project'
if "ade" in checkpoint_url:
lowerCamelCase__ : Optional[int] =True
lowerCamelCase__ : Dict =7_6_8
lowerCamelCase__ : List[Any] =[1, 1, 1, 0.5]
lowerCamelCase__ : Any =1_5_0
lowerCamelCase__ : List[str] =1_6
lowerCamelCase__ : Any ='huggingface/label-files'
lowerCamelCase__ : List[Any] ='ade20k-id2label.json'
lowerCamelCase__ : List[Any] =json.load(open(cached_download(hf_hub_url(snake_case_ , snake_case_ , repo_type='dataset' ) ) , 'r' ) )
lowerCamelCase__ : int ={int(snake_case_ ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict =idalabel
lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()}
lowerCamelCase__ : int =[1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def lowerCAmelCase_ ( snake_case_ : Tuple ) ->Any:
lowerCamelCase__ : Union[str, Any] =['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Any ) ->Tuple:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ : List[str] =name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
lowerCamelCase__ : Any =name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
lowerCamelCase__ : Tuple =name.replace('patch_embed' , '' )
if "pos_embed" in name:
lowerCamelCase__ : int =name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
lowerCamelCase__ : Dict =name.replace('proj' , 'projection' )
if "blocks" in name:
lowerCamelCase__ : Any =name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
lowerCamelCase__ : Dict =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase__ : Any =name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ : Optional[Any] =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ : Optional[int] =name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
lowerCamelCase__ : List[str] =name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
lowerCamelCase__ : str =name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
lowerCamelCase__ : List[Any] =name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
lowerCamelCase__ : Any =name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
lowerCamelCase__ : Dict =name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
lowerCamelCase__ : Optional[int] =int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ : Union[str, Any] =name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowerCamelCase__ : List[Any] =name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
lowerCamelCase__ : str =name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
lowerCamelCase__ : List[str] =name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
lowerCamelCase__ : Any =name.replace('conv1' , 'convolution1' )
if "conv2" in name:
lowerCamelCase__ : Any =name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ : int =name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ : int =name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ : Optional[Any] =name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ : Optional[Any] =name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ : Optional[int] =name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ : Dict =name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ : List[str] =name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ : str =name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ : int =name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ : List[Any] =name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('pretrained' , 'dpt' )
if "bn" in name:
lowerCamelCase__ : Tuple =name.replace('bn' , 'batch_norm' )
if "head" in name:
lowerCamelCase__ : Any =name.replace('head' , 'head.head' )
if "encoder.norm" in name:
lowerCamelCase__ : Dict =name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
lowerCamelCase__ : int =name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
lowerCamelCase__ : str =name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
lowerCamelCase__ : Optional[int] =name.replace('..' , '.' )
if "stem.conv" in name:
lowerCamelCase__ : List[Any] =name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
lowerCamelCase__ : Dict =name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ : List[Any] =name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
lowerCamelCase__ : List[Any] =name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ : int =name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
lowerCamelCase__ : List[str] =name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ : str =name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Any ) ->List[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : Any =state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowerCamelCase__ : str =state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : List[Any] =in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ : Any =in_proj_bias[: config.hidden_size]
lowerCamelCase__ : Union[str, Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : int =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Tuple =in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : Tuple =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( ) ->Union[str, Any]:
lowerCamelCase__ : List[Any] ='http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Dict =Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : int ) ->int:
lowerCamelCase__ , lowerCamelCase__ : List[Any] =get_dpt_config(snake_case_ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ : Union[str, Any] =torch.load(snake_case_ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case_ )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ : str =state_dict.pop(snake_case_ )
lowerCamelCase__ : Tuple =val
# read in qkv matrices
read_in_q_k_v(snake_case_ , snake_case_ )
# load HuggingFace model
lowerCamelCase__ : str =DPTForSemanticSegmentation(snake_case_ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# Check outputs on an image
lowerCamelCase__ : Optional[int] =4_8_0 if 'ade' in checkpoint_url else 3_8_4
lowerCamelCase__ : Optional[Any] =DPTImageProcessor(size=snake_case_ )
lowerCamelCase__ : Optional[int] =prepare_img()
lowerCamelCase__ : Optional[int] =image_processor(snake_case_ , return_tensors='pt' )
# forward pass
lowerCamelCase__ : int =model(**snake_case_ ).logits if 'ade' in checkpoint_url else model(**snake_case_ ).predicted_depth
if show_prediction:
lowerCamelCase__ : Optional[Any] =(
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case_ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
lowerCAmelCase = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 174 | 1 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
__lowerCAmelCase = 0
__lowerCAmelCase = str(_UpperCamelCase )
while len(_UpperCamelCase ) != 1:
__lowerCAmelCase = [int(_UpperCamelCase ) for i in num_string]
__lowerCAmelCase = 1
for i in range(0 , len(_UpperCamelCase ) ):
total *= numbers[i]
__lowerCAmelCase = str(_UpperCamelCase )
steps += 1
return steps
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
__lowerCAmelCase = 0
__lowerCAmelCase = str(_UpperCamelCase )
while len(_UpperCamelCase ) != 1:
__lowerCAmelCase = [int(_UpperCamelCase ) for i in num_string]
__lowerCAmelCase = 0
for i in range(0 , len(_UpperCamelCase ) ):
total += numbers[i]
__lowerCAmelCase = str(_UpperCamelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Dict = 1_6
A : Optional[int] = 3_2
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = 16 ):
'''simple docstring'''
__lowerCAmelCase = AutoTokenizer.from_pretrained("bert-base-cased" )
__lowerCAmelCase = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase = 8
else:
__lowerCAmelCase = None
return tokenizer.pad(
_UpperCamelCase , padding="longest" , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
__lowerCAmelCase = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : List[Any] = mocked_dataloaders # noqa: F811
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCamelCase ) == "1":
__lowerCAmelCase = 2
# Initialize accelerator
__lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config["lr"]
__lowerCAmelCase = int(config["num_epochs"] )
__lowerCAmelCase = int(config["seed"] )
__lowerCAmelCase = int(config["batch_size"] )
__lowerCAmelCase = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_UpperCamelCase )
def inner_training_loop(_UpperCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = AdamW(params=model.parameters() , lr=_UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase = get_dataloaders(_UpperCamelCase , _UpperCamelCase )
# Instantiate scheduler
__lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCAmelCase = model(**_UpperCamelCase )
__lowerCAmelCase = outputs.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase = model(**_UpperCamelCase )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
__lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCamelCase , default=_UpperCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 282 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def A__ ( _a : Dict , _a : str , _a : Optional[int] , _a : Optional[Any] ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def A__ ( _a : Dict , _a : Dict , _a : Optional[int] , _a : Union[str, Any] , _a : Tuple=True ):
'''simple docstring'''
model.train()
snake_case__ : Union[str, Any] =model(snake_case__ )
snake_case__ : str =F.mse_loss(snake_case__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case__ )
def A__ ( _a : Union[str, Any] , _a : Any=False ):
'''simple docstring'''
set_seed(42 )
snake_case__ : Optional[int] =RegressionModel()
snake_case__ : Dict =deepcopy(snake_case__ )
snake_case__ : Tuple =RegressionDataset(length=80 )
snake_case__ : Tuple =DataLoader(snake_case__ , batch_size=16 )
model.to(accelerator.device )
if sched:
snake_case__ : Optional[int] =AdamW(params=model.parameters() , lr=1E-3 )
snake_case__ : Union[str, Any] =AdamW(params=ddp_model.parameters() , lr=1E-3 )
snake_case__ : Optional[int] =LambdaLR(snake_case__ , lr_lambda=lambda _a : epoch**0.6_5 )
snake_case__ : Optional[int] =LambdaLR(snake_case__ , lr_lambda=lambda _a : epoch**0.6_5 )
# Make a copy of `model`
if sched:
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple =accelerator.prepare(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
snake_case__ , snake_case__ : Any =accelerator.prepare(snake_case__ , snake_case__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def A__ ( _a : Union[str, Any] ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ : str =get_training_setup(snake_case__ )
# Use a single batch
snake_case__ , snake_case__ : Any =next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case__ , snake_case__ : Optional[Any] =accelerator.gather((ddp_input, ddp_target) )
snake_case__ , snake_case__ : List[str] =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case__ : List[str] =ddp_input[torch.randperm(len(snake_case__ ) )]
def A__ ( _a : List[Any] ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ : List[Any] =get_training_setup(snake_case__ )
# Use a single batch
snake_case__ , snake_case__ : Tuple =next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case__ , snake_case__ : Optional[int] =accelerator.gather((ddp_input, ddp_target) )
snake_case__ , snake_case__ : str =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case__ : List[Any] =ddp_input[torch.randperm(len(snake_case__ ) )]
def A__ ( _a : str=False , _a : Optional[Any]=False ):
'''simple docstring'''
snake_case__ : Union[str, Any] =Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case__ , snake_case__ , snake_case__ : Any =get_training_setup(snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
snake_case__ , snake_case__ : str =batch.values()
# Gather the distributed inputs and targs for the base model
snake_case__ , snake_case__ : Dict =accelerator.gather((ddp_input, ddp_target) )
snake_case__ , snake_case__ : Union[str, Any] =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case__ : Union[str, Any] =ddp_input[torch.randperm(len(snake_case__ ) )]
GradientState._reset_state()
def A__ ( _a : Dict=False , _a : Optional[int]=False ):
'''simple docstring'''
snake_case__ : Optional[int] =Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] =get_training_setup(snake_case__ , snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
snake_case__ , snake_case__ : Any =batch.values()
# Gather the distributed inputs and targs for the base model
snake_case__ , snake_case__ : List[Any] =accelerator.gather((ddp_input, ddp_target) )
snake_case__ , snake_case__ : Optional[int] =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
snake_case__ : Union[str, Any] =(((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case__ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def A__ ( ):
'''simple docstring'''
snake_case__ : Optional[int] =Accelerator()
snake_case__ : Optional[int] =RegressionDataset(length=80 )
snake_case__ : List[str] =DataLoader(snake_case__ , batch_size=16 )
snake_case__ : Optional[int] =RegressionDataset(length=96 )
snake_case__ : Union[str, Any] =DataLoader(snake_case__ , batch_size=16 )
snake_case__ , snake_case__ : Optional[Any] =accelerator.prepare(snake_case__ , snake_case__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if iteration < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if batch_num < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def A__ ( ):
'''simple docstring'''
snake_case__ : Dict =Accelerator()
snake_case__ : int =accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(snake_case__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(snake_case__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(snake_case__ , snake_case__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case__ , snake_case__ )
def A__ ( _a : Any ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 385 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 313 | 0 |
"""simple docstring"""
import math
def __a ( A , A = 0 , A = 0 ) -> list:
'''simple docstring'''
A__ = end or len(A )
for i in range(A , A ):
A__ = i
A__ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
A__ = array[temp_index - 1]
temp_index -= 1
A__ = temp_index_value
return array
def __a ( A , A , A ) -> None: # Max Heap
'''simple docstring'''
A__ = index
A__ = 2 * index + 1 # Left Node
A__ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
A__ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
A__ = right_index
if largest != index:
A__ , A__ = array[largest], array[index]
heapify(A , A , A )
def __a ( A ) -> list:
'''simple docstring'''
A__ = len(A )
for i in range(n // 2 , -1 , -1 ):
heapify(A , A , A )
for i in range(n - 1 , 0 , -1 ):
A__ , A__ = array[0], array[i]
heapify(A , 0 , A )
return array
def __a ( A , A , A , A ) -> int:
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __a ( A , A , A , A ) -> int:
'''simple docstring'''
A__ = low
A__ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
A__ , A__ = array[j], array[i]
i += 1
def __a ( A ) -> list:
'''simple docstring'''
if len(A ) == 0:
return array
A__ = 2 * math.ceil(math.loga(len(A ) ) )
A__ = 16
return intro_sort(A , 0 , len(A ) , A , A )
def __a ( A , A , A , A , A ) -> list:
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(A )
max_depth -= 1
A__ = median_of_a(A , A , start + ((end - start) // 2) + 1 , end - 1 )
A__ = partition(A , A , A , A )
intro_sort(A , A , A , A , A )
A__ = p
return insertion_sort(A , A , A )
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase =input("""Enter numbers separated by a comma : """).strip()
__UpperCAmelCase =[float(item) for item in user_input.split(""",""")]
print(sort(unsorted)) | 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
A__ = {
"input_ids": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
A__ = model(UpperCamelCase__ )["last_hidden_state"]
A__ = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice.
A__ = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) ) | 261 | 0 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = '▁'
SCREAMING_SNAKE_CASE = {'vocab_file': 'prophetnet.tokenizer'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
SCREAMING_SNAKE_CASE = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
SCREAMING_SNAKE_CASE = {
'microsoft/xprophetnet-large-wiki100-cased': 5_1_2,
}
def a (lowerCAmelCase__ ):
__a = collections.OrderedDict()
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as reader:
__a = reader.readlines()
for index, token in enumerate(lowerCAmelCase__ ):
__a = token.rstrip("""\n""" )
__a = index
return vocab
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __A , __A="[SEP]" , __A="[SEP]" , __A="[SEP]" , __A="[UNK]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A = None , **__A , ):
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , sep_token=__A , unk_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
__a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__a = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4}
for i in range(10 ):
__a = f'''[unused{i}]'''
__a = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__a = 12
__a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__A )
def __getstate__( self ):
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self , __A ):
__a = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self , __A , __A = None , __A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return ([0] * len(__A )) + [1]
return ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
def snake_case_ ( self , __A , __A = None ):
__a = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self ):
return len(self.sp_model ) + self.fairseq_offset
def snake_case_ ( self ):
__a = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self , __A ):
return self.sp_model.encode(__A , out_type=__A )
def snake_case_ ( self , __A ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a = self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self , __A ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self , __A ):
__a = """""".join(__A ).replace(__A , """ """ ).strip()
return out_string
def snake_case_ ( self , __A , __A = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , """wb""" ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def snake_case_ ( self , __A , __A = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__a = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 99 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
if start < end:
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ ,lowerCAmelCase__ = _in_place_partition(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += _in_place_quick_sort(UpperCamelCase_ , UpperCamelCase_ , p - 1 )
count += _in_place_quick_sort(UpperCamelCase_ , p + 1 , UpperCamelCase_ )
return count
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ = start - 1
for index in range(UpperCamelCase_ , UpperCamelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ = new_pivot_index + 1
lowerCAmelCase__ = a[new_pivot_index]
lowerCAmelCase__ = a[index]
lowerCAmelCase__ = temp
lowerCAmelCase__ = a[new_pivot_index + 1]
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = temp
return new_pivot_index + 1, count
UpperCAmelCase__ : Tuple = TemporaryFile()
UpperCAmelCase__ : List[str] = 1_00 # 1000 elements are to be sorted
UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 1 # mean and standard deviation
UpperCAmelCase__ : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase__ : Optional[Any] = np.load(outfile)
UpperCAmelCase__ : Any = len(M) - 1
UpperCAmelCase__ : Tuple = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 48 | 0 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = """PoolFormerConfig"""
# Base docstring
_lowerCAmelCase = """sail/poolformer_s12"""
_lowerCAmelCase = [1, 5_1_2, 7, 7]
# Image classification docstring
_lowerCAmelCase = """sail/poolformer_s12"""
_lowerCAmelCase = """tabby, tabby cat"""
_lowerCAmelCase = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = 0.0 , _lowerCamelCase = False ):
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
_lowerCAmelCase : List[str] = 1 - drop_prob
_lowerCAmelCase : List[str] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_lowerCAmelCase : str = keep_prob + torch.rand(_lowerCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_lowerCAmelCase : Any = input.div(_lowerCamelCase ) * random_tensor
return output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A = None ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = drop_prob
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return drop_path(_A ,self.drop_prob ,self.training )
def __lowerCamelCase ( self ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=None ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = patch_size if isinstance(_A ,collections.abc.Iterable ) else (patch_size, patch_size)
_lowerCAmelCase : Union[str, Any] = stride if isinstance(_A ,collections.abc.Iterable ) else (stride, stride)
_lowerCAmelCase : Optional[Any] = padding if isinstance(_A ,collections.abc.Iterable ) else (padding, padding)
_lowerCAmelCase : List[Any] = nn.Convad(_A ,_A ,kernel_size=_A ,stride=_A ,padding=_A )
_lowerCAmelCase : Any = norm_layer(_A ) if norm_layer else nn.Identity()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = self.projection(_A )
_lowerCAmelCase : Union[str, Any] = self.norm(_A )
return embeddings
class __UpperCamelCase ( nn.GroupNorm ):
def __init__( self ,_A ,**_A ):
'''simple docstring'''
super().__init__(1 ,_A ,**_A )
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.AvgPoolad(_A ,stride=1 ,padding=pool_size // 2 ,count_include_pad=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.pool(_A ) - hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : str = nn.Convad(_A ,_A ,1 )
_lowerCAmelCase : Optional[Any] = nn.Convad(_A ,_A ,1 )
_lowerCAmelCase : Union[str, Any] = PoolFormerDropPath(_A )
if isinstance(config.hidden_act ,_A ):
_lowerCAmelCase : Optional[int] = ACTaFN[config.hidden_act]
else:
_lowerCAmelCase : str = config.hidden_act
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.conva(_A )
_lowerCAmelCase : Optional[Any] = self.act_fn(_A )
_lowerCAmelCase : List[str] = self.drop(_A )
_lowerCAmelCase : Union[str, Any] = self.conva(_A )
_lowerCAmelCase : Any = self.drop(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = PoolFormerPooling(_A )
_lowerCAmelCase : int = PoolFormerOutput(_A ,_A ,_A ,_A )
_lowerCAmelCase : List[Any] = PoolFormerGroupNorm(_A )
_lowerCAmelCase : Dict = PoolFormerGroupNorm(_A )
# Useful for training neural nets
_lowerCAmelCase : Optional[Any] = PoolFormerDropPath(_A ) if drop_path > 0.0 else nn.Identity()
_lowerCAmelCase : Any = config.use_layer_scale
if config.use_layer_scale:
_lowerCAmelCase : List[str] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_A) ) ,requires_grad=_A )
_lowerCAmelCase : Optional[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_A) ) ,requires_grad=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.use_layer_scale:
_lowerCAmelCase : Optional[int] = self.pooling(self.before_norm(_A ) )
_lowerCAmelCase : List[str] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_lowerCAmelCase : Union[str, Any] = hidden_states + self.drop_path(_A )
_lowerCAmelCase : Union[str, Any] = ()
_lowerCAmelCase : Optional[int] = self.output(self.after_norm(_A ) )
_lowerCAmelCase : Optional[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_lowerCAmelCase : int = hidden_states + self.drop_path(_A )
_lowerCAmelCase : int = (output,) + outputs
return outputs
else:
_lowerCAmelCase : List[Any] = self.drop_path(self.pooling(self.before_norm(_A ) ) )
# First residual connection
_lowerCAmelCase : int = pooling_output + hidden_states
_lowerCAmelCase : List[str] = ()
# Second residual connection inside the PoolFormerOutput block
_lowerCAmelCase : Tuple = self.drop_path(self.output(self.after_norm(_A ) ) )
_lowerCAmelCase : str = hidden_states + layer_output
_lowerCAmelCase : Union[str, Any] = (output,) + outputs
return outputs
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = config
# stochastic depth decay rule
_lowerCAmelCase : str = [x.item() for x in torch.linspace(0 ,config.drop_path_rate ,sum(config.depths ) )]
# patch embeddings
_lowerCAmelCase : Optional[Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] ,stride=config.strides[i] ,padding=config.padding[i] ,num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] ,hidden_size=config.hidden_sizes[i] ,) )
_lowerCAmelCase : Dict = nn.ModuleList(_A )
# Transformer blocks
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Tuple = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_lowerCAmelCase : int = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_A ,num_channels=config.hidden_sizes[i] ,pool_size=config.pool_size ,hidden_size=config.hidden_sizes[i] ,intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) ,drop_path=dpr[cur + j] ,) )
blocks.append(nn.ModuleList(_A ) )
_lowerCAmelCase : Tuple = nn.ModuleList(_A )
def __lowerCamelCase ( self ,_A ,_A=False ,_A=True ):
'''simple docstring'''
_lowerCAmelCase : Dict = () if output_hidden_states else None
_lowerCAmelCase : str = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings ,self.block ) ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = layers
# Get patch embeddings from hidden_states
_lowerCAmelCase : Dict = embedding_layer(_A )
# Send the embeddings through the blocks
for _, blk in enumerate(_A ):
_lowerCAmelCase : Optional[int] = blk(_A )
_lowerCAmelCase : int = layer_outputs[0]
if output_hidden_states:
_lowerCAmelCase : List[str] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_A ,hidden_states=_A )
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = PoolFormerConfig
_UpperCAmelCase = "poolformer"
_UpperCAmelCase = "pixel_values"
_UpperCAmelCase = True
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if isinstance(_A ,(nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_A ,nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def __lowerCamelCase ( self ,_A ,_A=False ):
'''simple docstring'''
if isinstance(_A ,_A ):
_lowerCAmelCase : Any = value
_lowerCAmelCase = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_lowerCAmelCase = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , a__ , )
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ):
'''simple docstring'''
super().__init__(_A )
_lowerCAmelCase : List[Any] = config
_lowerCAmelCase : int = PoolFormerEncoder(_A )
# Initialize weights and apply final processing
self.post_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_A ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def __lowerCamelCase ( self ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
_lowerCAmelCase : List[Any] = self.encoder(
_A ,output_hidden_states=_A ,return_dict=_A ,)
_lowerCAmelCase : Optional[int] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_A ,hidden_states=encoder_outputs.hidden_states ,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Dict = nn.Linear(config.hidden_size ,config.hidden_size )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.dense(_A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , a__ , )
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ):
'''simple docstring'''
super().__init__(_A )
_lowerCAmelCase : Optional[int] = config.num_labels
_lowerCAmelCase : Optional[int] = PoolFormerModel(_A )
# Final norm
_lowerCAmelCase : Tuple = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_lowerCAmelCase : Tuple = (
nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_A ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def __lowerCamelCase ( self ,_A = None ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Dict = self.poolformer(
_A ,output_hidden_states=_A ,return_dict=_A ,)
_lowerCAmelCase : Tuple = outputs[0]
_lowerCAmelCase : Any = self.classifier(self.norm(_A ).mean([-2, -1] ) )
_lowerCAmelCase : List[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase : int = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase : str = 'single_label_classification'
else:
_lowerCAmelCase : Optional[int] = 'multi_label_classification'
if self.config.problem_type == "regression":
_lowerCAmelCase : Tuple = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase : Union[str, Any] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
_lowerCAmelCase : List[str] = loss_fct(_A ,_A )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase : List[str] = BCEWithLogitsLoss()
_lowerCAmelCase : Any = loss_fct(_A ,_A )
if not return_dict:
_lowerCAmelCase : Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_A ,logits=_A ,hidden_states=outputs.hidden_states )
| 16 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 | 1 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : int =JukeboxTokenizer
a : Optional[Any] ={
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def _a ( self ):
import torch
UpperCamelCase_: List[str] = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
UpperCamelCase_: Optional[Any] = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase_: Union[str, Any] = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _a ( self ):
import torch
UpperCamelCase_: Tuple = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
UpperCamelCase_: Any = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase_: Any = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) | 57 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = SwinvaConfig()
UpperCamelCase_ = swinva_name.split("_" )
UpperCamelCase_ = name_split[1]
if "to" in name_split[3]:
UpperCamelCase_ = int(name_split[3][-3:] )
else:
UpperCamelCase_ = int(name_split[3] )
if "to" in name_split[2]:
UpperCamelCase_ = int(name_split[2][-2:] )
else:
UpperCamelCase_ = int(name_split[2][6:] )
if model_size == "tiny":
UpperCamelCase_ = 9_6
UpperCamelCase_ = (2, 2, 6, 2)
UpperCamelCase_ = (3, 6, 1_2, 2_4)
elif model_size == "small":
UpperCamelCase_ = 9_6
UpperCamelCase_ = (2, 2, 1_8, 2)
UpperCamelCase_ = (3, 6, 1_2, 2_4)
elif model_size == "base":
UpperCamelCase_ = 1_2_8
UpperCamelCase_ = (2, 2, 1_8, 2)
UpperCamelCase_ = (4, 8, 1_6, 3_2)
else:
UpperCamelCase_ = 1_9_2
UpperCamelCase_ = (2, 2, 1_8, 2)
UpperCamelCase_ = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
UpperCamelCase_ = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
UpperCamelCase_ = 2_1_8_4_1
UpperCamelCase_ = "huggingface/label-files"
UpperCamelCase_ = "imagenet-22k-id2label.json"
UpperCamelCase_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
UpperCamelCase_ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase_ = 1_0_0_0
UpperCamelCase_ = "huggingface/label-files"
UpperCamelCase_ = "imagenet-1k-id2label.json"
UpperCamelCase_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
UpperCamelCase_ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
UpperCamelCase_ = img_size
UpperCamelCase_ = num_classes
UpperCamelCase_ = embed_dim
UpperCamelCase_ = depths
UpperCamelCase_ = num_heads
UpperCamelCase_ = window_size
return config
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCamelCase_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
UpperCamelCase_ = "encoder." + name
if "attn.proj" in name:
UpperCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
UpperCamelCase_ = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
UpperCamelCase_ = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
UpperCamelCase_ = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
UpperCamelCase_ = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
UpperCamelCase_ = "layernorm.weight"
if name == "norm.bias":
UpperCamelCase_ = "layernorm.bias"
if "head" in name:
UpperCamelCase_ = name.replace("head" , "classifier" )
else:
UpperCamelCase_ = "swinv2." + name
return name
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase_ = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase_ = key.split("." )
UpperCamelCase_ = int(key_split[1] )
UpperCamelCase_ = int(key_split[3] )
UpperCamelCase_ = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase_ = val[:dim, :]
UpperCamelCase_ = val[dim : dim * 2, :]
UpperCamelCase_ = val[-dim:, :]
else:
UpperCamelCase_ = val[:dim]
UpperCamelCase_ = val[
dim : dim * 2
]
UpperCamelCase_ = val[-dim:]
else:
UpperCamelCase_ = val
return orig_state_dict
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
"""simple docstring"""
UpperCamelCase_ = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
UpperCamelCase_ = get_swinva_config(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = SwinvaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase_ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase_ = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
UpperCamelCase_ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
UpperCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
UpperCamelCase_ = timm_model(inputs["pixel_values"] )
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
print(f"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE :int = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 628 | 0 |
'''simple docstring'''
import numpy as np
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None ) -> Union[str, Any]:
self.set_matricies(red=__UpperCamelCase , green=__UpperCamelCase , blue=__UpperCamelCase , red_edge=__UpperCamelCase , nir=__UpperCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None ) -> Any:
if red is not None:
A_ : str = red
if green is not None:
A_ : str = green
if blue is not None:
A_ : Any = blue
if red_edge is not None:
A_ : int = red_edge
if nir is not None:
A_ : Optional[Any] = nir
return True
def UpperCAmelCase_ ( self , _lowerCamelCase="" , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None ) -> Optional[Any]:
self.set_matricies(red=__UpperCamelCase , green=__UpperCamelCase , blue=__UpperCamelCase , red_edge=__UpperCamelCase , nir=__UpperCamelCase )
A_ : Any = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase_ ( self ) -> Optional[int]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase_ ( self ) -> Dict:
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase_ ( self ) -> str:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase_ ( self ) -> str:
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase_ ( self ) -> str:
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase_ ( self ) -> Any:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase_ ( self ) -> Any:
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase_ ( self ) -> Any:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase_ ( self ) -> Any:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase_ ( self ) -> Tuple:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase_ ( self ) -> Any:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase_ ( self , _lowerCamelCase=0.08 , _lowerCamelCase=1.22 , _lowerCamelCase=0.03 ) -> str:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase_ ( self ) -> Dict:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase_ ( self ) -> List[str]:
return (self.nir / self.green) - 1
def UpperCAmelCase_ ( self ) -> Dict:
return (self.nir / self.redEdge) - 1
def UpperCAmelCase_ ( self ) -> Tuple:
return (self.red - self.blue) / self.red
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : Tuple = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def UpperCAmelCase_ ( self ) -> List[Any]:
return self.nir - self.green
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase_ ( self ) -> Dict:
A_ : List[Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase_ ( self , _lowerCamelCase=0.16 ) -> Optional[int]:
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase_ ( self , _lowerCamelCase=0.5 ) -> Any:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def UpperCAmelCase_ ( self , _lowerCamelCase=None , _lowerCamelCase=None ) -> int:
return (self.nir - b) / (a * self.red)
def UpperCAmelCase_ ( self ) -> Tuple:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase_ ( self ) -> Dict:
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase_ ( self ) -> int:
return self.nir / self.red
def UpperCAmelCase_ ( self ) -> str:
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase_ ( self ) -> int:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase_ ( self ) -> Any:
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase_ ( self ) -> str:
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : List[str] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
A_ : Any = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def UpperCAmelCase_ ( self ) -> Dict:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase_ ( self ) -> str:
return self.nir / self.red
def UpperCAmelCase_ ( self ) -> int:
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase_ ( self ) -> Any:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 719 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__A )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = field(default='''audio-classification''', metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase = Features({'''audio''': Audio()} )
lowerCamelCase = Features({'''labels''': ClassLabel} )
lowerCamelCase = "audio"
lowerCamelCase = "labels"
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[Any]:
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , _lowerCamelCase ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
A_ : Optional[int] = copy.deepcopy(self )
A_ : int = self.label_schema.copy()
A_ : Optional[Any] = features[self.label_column]
A_ : Optional[Any] = label_schema
return task_template
@property
def UpperCAmelCase_ ( self ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 385 | 0 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_lowerCamelCase = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_lowerCamelCase = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _snake_case ( self :Union[str, Any] , __A :List[Any] , __A :Optional[Any] , __A :int=False ) -> int:
"""simple docstring"""
if return_pvalue:
SCREAMING_SNAKE_CASE__ = pearsonr(__A , __A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__A , __A )[0] )} | 6 |
'''simple docstring'''
from __future__ import annotations
def _a (lowercase__ : int , lowercase__ : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__snake_case = number_of_bytes // partitions
__snake_case = []
for i in range(lowercase__ ):
__snake_case = i * bytes_per_partition + 1
__snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = '''ZinengTang/tvlt-base'''
lowerCAmelCase = tempfile.mkdtemp()
def __snake_case ( self , **UpperCAmelCase_ ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def __snake_case ( self , **UpperCAmelCase_ ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
lowerCAmelCase = np.ones([1_20_00] )
lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='''np''' )
lowerCAmelCase = processor(audio=UpperCAmelCase_ , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __snake_case ( self ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
lowerCAmelCase = np.ones([3, 2_24, 2_24] )
lowerCAmelCase = image_processor(UpperCAmelCase_ , return_tensors='''np''' )
lowerCAmelCase = processor(images=UpperCAmelCase_ , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __snake_case ( self ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
lowerCAmelCase = np.ones([1_20_00] )
lowerCAmelCase = np.ones([3, 2_24, 2_24] )
lowerCAmelCase = processor(audio=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def __snake_case ( self ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 33 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Any ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def __snake_case ( self , UpperCAmelCase_=0 ):
lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCAmelCase_ ) )
lowerCAmelCase = np.random.RandomState(UpperCAmelCase_ )
lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# warmup pass to apply optimizations
lowerCAmelCase = pipe(**self.get_dummy_inputs() )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __snake_case ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __snake_case ( self ):
lowerCAmelCase = ort.SessionOptions()
lowerCAmelCase = False
return options
def __snake_case ( self ):
lowerCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCAmelCase = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase = np.random.RandomState(0 )
lowerCAmelCase = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type='''np''' , )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
lowerCAmelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __snake_case ( self ):
lowerCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCAmelCase = init_image.resize((7_68, 5_12) )
lowerCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase = np.random.RandomState(0 )
lowerCAmelCase = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type='''np''' , )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
lowerCAmelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 33 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =TextToVideoSDPipeline
a : Optional[int] =TEXT_TO_IMAGE_PARAMS
a : Any =TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a : Union[str, Any] =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""),up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""),cross_attention_dim=32,attention_head_dim=4,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = TextToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """np"""
__lowerCAmelCase = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__lowerCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowerCAmelCase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(),reason="""XFormers attention is only available with CUDA and `xformers` installed""",)
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=25,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
__lowerCAmelCase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = """Spiderman is surfing"""
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase = pipe(__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=2,output_type="""pt""" ).frames
__lowerCAmelCase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 689 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
_a : Any = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(lowercase , lowercase )
if weight_type is not None:
__lowerCAmelCase = getattr(lowercase , lowercase ).shape
else:
__lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(lowercase )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , lowercase )
if "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase = """weight"""
else:
__lowerCAmelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase = name.split(""".""" )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
__lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Dict:
if config_path is not None:
__lowerCAmelCase = UniSpeechSatConfig.from_pretrained(lowercase )
else:
__lowerCAmelCase = UniSpeechSatConfig()
__lowerCAmelCase = """"""
if is_finetuned:
__lowerCAmelCase = UniSpeechSatForCTC(lowercase )
else:
__lowerCAmelCase = UniSpeechSatForPreTraining(lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_a : Union[str, Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 689 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 564 |
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : int = 1_0_0_0 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 564 | 1 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def lowerCAmelCase_ ( a : np.ndarray , a : float ):
# For applying gaussian function for each element in matrix.
a__ = math.sqrt(a )
a__ = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def lowerCAmelCase_ ( a : np.ndarray , a : int , a : int , a : int ):
a__ = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def lowerCAmelCase_ ( a : int , a : float ):
# Creates a gaussian kernel of given dimension.
a__ = np.zeros((kernel_size, kernel_size) )
for i in range(0 , a ):
for j in range(0 , a ):
a__ = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(a , a )
def lowerCAmelCase_ ( a : np.ndarray , a : float , a : float , a : int , ):
a__ = np.zeros(img.shape )
a__ = get_gauss_kernel(a , a )
a__ , a__ = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
a__ = get_slice(a , a , a , a )
a__ = img_s - img_s[kernel_size // 2, kernel_size // 2]
a__ = vec_gaussian(a , a )
a__ = np.multiply(a , a )
a__ = np.multiply(a , a )
a__ = np.sum(a ) / np.sum(a )
a__ = val
return imga
def lowerCAmelCase_ ( a : list ):
a__ = args[1] if args[1:] else '../image_data/lena.jpg'
a__ = float(args[2] ) if args[2:] else 1.0
a__ = float(args[3] ) if args[3:] else 1.0
if args[4:]:
a__ = int(args[4] )
a__ = kernel_size + abs(kernel_size % 2 - 1 )
else:
a__ = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
__A , __A , __A , __A : Optional[Any] = parse_args(sys.argv)
__A : Dict = cva.imread(filename, 0)
cva.imshow('input image', img)
__A : Tuple = img / 2_55
__A : List[str] = out.astype('float32')
__A : Optional[int] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
__A : Optional[Any] = out * 2_55
__A : Tuple = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 394 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:jnp.ndarray
SCREAMING_SNAKE_CASE:jnp.ndarray
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:int
SCREAMING_SNAKE_CASE:Tuple[int] = (16, 32, 96, 256)
SCREAMING_SNAKE_CASE:jnp.dtype = jnp.floataa
def lowercase__ ( self ):
"""simple docstring"""
a__ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a__ = []
for i in range(len(self.block_out_channels ) - 1 ):
a__ = self.block_out_channels[i]
a__ = self.block_out_channels[i + 1]
a__ = nn.Conv(
_a , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_a )
a__ = nn.Conv(
_a , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_a )
a__ = blocks
a__ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _a ):
"""simple docstring"""
a__ = self.conv_in(_a )
a__ = nn.silu(_a )
for block in self.blocks:
a__ = block(_a )
a__ = nn.silu(_a )
a__ = self.conv_out(_a )
return embedding
@flax_register_to_config
class _UpperCamelCase ( nn.Module , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:int = 32
SCREAMING_SNAKE_CASE:int = 4
SCREAMING_SNAKE_CASE:Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE:Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE:Tuple[int] = (320, 640, 1280, 1280)
SCREAMING_SNAKE_CASE:int = 2
SCREAMING_SNAKE_CASE:Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE:Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE:int = 1280
SCREAMING_SNAKE_CASE:float = 0.0
SCREAMING_SNAKE_CASE:bool = False
SCREAMING_SNAKE_CASE:jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE:bool = True
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:str = "rgb"
SCREAMING_SNAKE_CASE:Tuple[int] = (16, 32, 96, 256)
def lowercase__ ( self , _a ):
"""simple docstring"""
# init input tensors
a__ = (1, self.in_channels, self.sample_size, self.sample_size)
a__ = jnp.zeros(_a , dtype=jnp.floataa )
a__ = jnp.ones((1,) , dtype=jnp.intaa )
a__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
a__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
a__ = jnp.zeros(_a , dtype=jnp.floataa )
a__ , a__ = jax.random.split(_a )
a__ = {'params': params_rng, 'dropout': dropout_rng}
return self.init(_a , _a , _a , _a , _a )["params"]
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.block_out_channels
a__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a__ = self.num_attention_heads or self.attention_head_dim
# input
a__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
a__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
a__ = FlaxTimestepEmbedding(_a , dtype=self.dtype )
a__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
a__ = self.only_cross_attention
if isinstance(_a , _a ):
a__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_a , _a ):
a__ = (num_attention_heads,) * len(self.down_block_types )
# down
a__ = []
a__ = []
a__ = block_out_channels[0]
a__ = nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
for i, down_block_type in enumerate(self.down_block_types ):
a__ = output_channel
a__ = block_out_channels[i]
a__ = i == len(_a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a__ = FlaxCrossAttnDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
a__ = FlaxDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_a )
for _ in range(self.layers_per_block ):
a__ = nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
if not is_final_block:
a__ = nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
a__ = down_blocks
a__ = controlnet_down_blocks
# mid
a__ = block_out_channels[-1]
a__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=_a , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
a__ = nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _a , _a , _a , _a , _a = 1.0 , _a = True , _a = False , ):
"""simple docstring"""
a__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
a__ = jnp.flip(_a , axis=1 )
# 1. time
if not isinstance(_a , jnp.ndarray ):
a__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_a , jnp.ndarray ) and len(timesteps.shape ) == 0:
a__ = timesteps.astype(dtype=jnp.floataa )
a__ = jnp.expand_dims(_a , 0 )
a__ = self.time_proj(_a )
a__ = self.time_embedding(_a )
# 2. pre-process
a__ = jnp.transpose(_a , (0, 2, 3, 1) )
a__ = self.conv_in(_a )
a__ = jnp.transpose(_a , (0, 2, 3, 1) )
a__ = self.controlnet_cond_embedding(_a )
sample += controlnet_cond
# 3. down
a__ = (sample,)
for down_block in self.down_blocks:
if isinstance(_a , _a ):
a__ , a__ = down_block(_a , _a , _a , deterministic=not train )
else:
a__ , a__ = down_block(_a , _a , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
a__ = self.mid_block(_a , _a , _a , deterministic=not train )
# 5. contronet blocks
a__ = ()
for down_block_res_sample, controlnet_block in zip(_a , self.controlnet_down_blocks ):
a__ = controlnet_block(_a )
controlnet_down_block_res_samples += (down_block_res_sample,)
a__ = controlnet_down_block_res_samples
a__ = self.controlnet_mid_block(_a )
# 6. scaling
a__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_a , mid_block_res_sample=_a )
| 394 | 1 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_lowerCAmelCase : str = n - k
# Calculate C(n,k)
for i in range(_lowerCamelCase ):
result *= n - i
result //= i + 1
return result
def A ( _lowerCamelCase ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , _lowerCamelCase ) // (node_count + 1)
def A ( _lowerCamelCase ):
'''simple docstring'''
if n < 0:
raise ValueError("factorial() not defined for negative values" )
_lowerCAmelCase : Any = 1
for i in range(1 , n + 1 ):
result *= i
return result
def A ( _lowerCamelCase ):
'''simple docstring'''
return catalan_number(_lowerCamelCase ) * factorial(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 710 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_UpperCamelCase : Optional[Any] =0
_UpperCamelCase : List[Any] =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_UpperCamelCase : List[str] =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_UpperCamelCase : Union[str, Any] =tuple[int, int]
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
__lowerCamelCase = pos_x
__lowerCamelCase = pos_y
__lowerCamelCase = (pos_y, pos_x)
__lowerCamelCase = goal_x
__lowerCamelCase = goal_y
__lowerCamelCase = g_cost
__lowerCamelCase = parent
__lowerCamelCase = self.calculate_heuristic()
__lowerCamelCase = self.g_cost + self.h_cost
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.pos_x - self.goal_x
__lowerCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_snake_case ) + abs(_snake_case )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , _snake_case ):
"""simple docstring"""
return self.f_cost < other.f_cost
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _snake_case )
__lowerCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _snake_case )
__lowerCamelCase = [self.start]
__lowerCamelCase = []
__lowerCamelCase = False
def _lowerCamelCase ( self ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_snake_case )
self.closed_nodes.append(_snake_case )
__lowerCamelCase = self.get_successors(_snake_case )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_snake_case )
else:
# retrieve the best current path
__lowerCamelCase = self.open_nodes.pop(self.open_nodes.index(_snake_case ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_snake_case )
else:
self.open_nodes.append(_snake_case )
return [self.start.pos]
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = []
for action in delta:
__lowerCamelCase = parent.pos_x + action[1]
__lowerCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_snake_case , _snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _snake_case , ) )
return successors
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase = node
__lowerCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCamelCase = current_node.parent
path.reverse()
return path
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = AStar(_snake_case , _snake_case )
__lowerCamelCase = AStar(_snake_case , _snake_case )
__lowerCamelCase = False
def _lowerCamelCase ( self ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowerCamelCase = self.fwd_astar.open_nodes.pop(0 )
__lowerCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_snake_case , _snake_case )
self.fwd_astar.closed_nodes.append(_snake_case )
self.bwd_astar.closed_nodes.append(_snake_case )
__lowerCamelCase = current_bwd_node
__lowerCamelCase = current_fwd_node
__lowerCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(_snake_case ),
self.bwd_astar: self.bwd_astar.get_successors(_snake_case ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_snake_case )
else:
# retrieve the best current path
__lowerCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(_snake_case ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_snake_case )
else:
astar.open_nodes.append(_snake_case )
return [self.fwd_astar.start.pos]
def _lowerCamelCase ( self , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = self.fwd_astar.retrace_path(_snake_case )
__lowerCamelCase = self.bwd_astar.retrace_path(_snake_case )
bwd_path.pop()
bwd_path.reverse()
__lowerCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_UpperCamelCase : Optional[Any] =(0, 0)
_UpperCamelCase : Dict =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_UpperCamelCase : Tuple =time.time()
_UpperCamelCase : List[str] =AStar(init, goal)
_UpperCamelCase : Union[str, Any] =a_star.search()
_UpperCamelCase : str =time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_UpperCamelCase : List[Any] =time.time()
_UpperCamelCase : Tuple =BidirectionalAStar(init, goal)
_UpperCamelCase : Tuple =time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 316 |
'''simple docstring'''
import os
from pathlib import Path
def lowerCamelCase_ ( ):
from torch.utils.cpp_extension import load
__lowerCamelCase = Path(A_ ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
__lowerCamelCase = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , A_ , with_cuda=A_ , extra_include_paths=[str(A_ )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 316 | 1 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict=True ):
"""simple docstring"""
model.train()
__lowercase = model(_lowerCamelCase )
__lowercase = F.mse_loss(_lowerCamelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase )
def lowerCAmelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=False ):
"""simple docstring"""
set_seed(42 )
__lowercase = RegressionModel()
__lowercase = deepcopy(_lowerCamelCase )
__lowercase = RegressionDataset(length=80 )
__lowercase = DataLoader(_lowerCamelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
__lowercase = AdamW(params=model.parameters() , lr=1E-3 )
__lowercase = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__lowercase = LambdaLR(_lowerCamelCase , lr_lambda=lambda UpperCamelCase__ : epoch**0.65 )
__lowercase = LambdaLR(_lowerCamelCase , lr_lambda=lambda UpperCamelCase__ : epoch**0.65 )
# Make a copy of `model`
if sched:
__lowercase = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__lowercase = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase_ ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__lowercase = get_training_setup(_lowerCamelCase )
# Use a single batch
__lowercase = next(iter(_lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__lowercase = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
def lowerCAmelCase_ ( UpperCamelCase__ : Dict ):
"""simple docstring"""
__lowercase = get_training_setup(_lowerCamelCase )
# Use a single batch
__lowercase = next(iter(_lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__lowercase = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : List[str]=False ):
"""simple docstring"""
__lowercase = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase = get_training_setup(_lowerCamelCase )
for iteration, batch in enumerate(_lowerCamelCase ):
__lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__lowercase = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
GradientState._reset_state()
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int=False ):
"""simple docstring"""
__lowercase = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase = get_training_setup(_lowerCamelCase , _lowerCamelCase )
for iteration, batch in enumerate(_lowerCamelCase ):
__lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
__lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = Accelerator()
__lowercase = RegressionDataset(length=80 )
__lowercase = DataLoader(_lowerCamelCase , batch_size=16 )
__lowercase = RegressionDataset(length=96 )
__lowercase = DataLoader(_lowerCamelCase , batch_size=16 )
__lowercase = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCamelCase )
if iteration < len(_lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCamelCase )
if batch_num < len(_lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = Accelerator()
__lowercase = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(_lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(_lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase )
def lowerCAmelCase_ ( UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 719 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase_ ( UpperCamelCase__ : str ):
"""simple docstring"""
def decorator(UpperCamelCase__ : Tuple ):
__lowercase = getattr(UpperCamelCase__ , """handle_key""" , [] )
handle += [key]
setattr(UpperCamelCase__ , """handle_key""" , UpperCamelCase__ )
return func
return decorator
def lowerCAmelCase_ ( *UpperCamelCase__ : List[str] ):
"""simple docstring"""
def decorator(UpperCamelCase__ : Tuple ):
__lowercase = getattr(UpperCamelCase__ , """handle_key""" , [] )
handle += keys
setattr(UpperCamelCase__ , """handle_key""" , UpperCamelCase__ )
return func
return decorator
class lowerCamelCase__ ( _a ):
def __new__( cls : str , A_ : Optional[Any] , A_ : Union[str, Any] , A_ : int ):
'''simple docstring'''
__lowercase = super().__new__(cls , A_ , A_ , A_ )
if not hasattr(A_ , """key_handler""" ):
setattr(A_ , """key_handler""" , {} )
setattr(A_ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__lowercase = getattr(A_ , """handle_key""" , [] )
for key in handled_keys:
__lowercase = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict ):
'''simple docstring'''
__lowercase = get_character()
if char != KEYMAP["undefined"]:
__lowercase = ord(A_ )
__lowercase = cls.key_handler.get(A_ )
if handler:
__lowercase = char
return handler(cls )
else:
return None
def lowerCAmelCase_ ( cls : int ):
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 442 | 0 |
def _lowerCamelCase ( __A : int , __A : int ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : Any = str(bin(__A ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Dict = str(bin(__A ) )[2:] # remove the leading "0b"
_UpperCAmelCase : List[Any] = max(len(__A ) , len(__A ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(__A ) , b_binary.zfill(__A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 485 |
from __future__ import annotations
from math import ceil, floor, sqrt
def _lowerCamelCase ( __A : int = 2_000_000 ) -> int:
_UpperCAmelCase : list[int] = [0]
_UpperCAmelCase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_UpperCAmelCase : int = 0
# the area corresponding to the grid that gives the product closest to target
_UpperCAmelCase : int = 0
# an estimate of b, using the quadratic formula
_UpperCAmelCase : float
# the largest integer less than b_estimate
_UpperCAmelCase : int
# the largest integer less than b_estimate
_UpperCAmelCase : int
# the triangle number corresponding to b_floor
_UpperCAmelCase : int
# the triangle number corresponding to b_ceil
_UpperCAmelCase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_UpperCAmelCase : str = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_UpperCAmelCase : Dict = floor(__A )
_UpperCAmelCase : List[Any] = ceil(__A )
_UpperCAmelCase : Union[str, Any] = triangle_numbers[b_floor]
_UpperCAmelCase : List[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_UpperCAmelCase : Union[str, Any] = triangle_b_first_guess * triangle_a
_UpperCAmelCase : Optional[int] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_UpperCAmelCase : Any = triangle_b_second_guess * triangle_a
_UpperCAmelCase : int = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'{solution() = }')
| 485 | 1 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Tuple = BertTokenizer
_UpperCamelCase : Tuple = BertTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : List[Any] = True
_UpperCamelCase : List[Any] = filter_non_english
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
super().setUp()
lowercase__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = 'UNwant\u00E9d,running'
lowercase__ = 'unwanted, running'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer_class(self.vocab_file )
lowercase__ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(a , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'UNwant\u00E9d,running'
lowercase__ = tokenizer.tokenize(a )
lowercase__ = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowercase__ = tokenizer.encode(a , add_special_tokens=a )
lowercase__ = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(a )
lowercase__ = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
# With lower casing
lowercase__ = self.get_tokenizer(do_lower_case=a )
lowercase__ = self.get_rust_tokenizer(do_lower_case=a )
lowercase__ = 'UNwant\u00E9d,running'
lowercase__ = tokenizer.tokenize(a )
lowercase__ = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowercase__ = tokenizer.encode(a , add_special_tokens=a )
lowercase__ = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(a )
lowercase__ = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[int]:
"""simple docstring"""
lowercase__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[str]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Dict:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=a , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer()
lowercase__ = 'a\n\'ll !!to?\'d of, can\'t.'
lowercase__ = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(a ) , a )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
lowercase__ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowercase__ = {}
for i, token in enumerate(a ):
lowercase__ = i
lowercase__ = WordpieceTokenizer(vocab=a , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Dict:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Union[str, Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(a ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(a ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tokenizer_class.from_pretrained('bert-base-uncased' )
lowercase__ = tokenizer.encode('sequence builders' , add_special_tokens=a )
lowercase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=a )
lowercase__ = tokenizer.build_inputs_with_special_tokens(a )
lowercase__ = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(a , **a )
lowercase__ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowercase__ = tokenizer_r.encode_plus(
a , return_attention_mask=a , return_token_type_ids=a , return_offsets_mapping=a , add_special_tokens=a , )
lowercase__ = tokenizer_r.do_lower_case if hasattr(a , 'do_lower_case' ) else False
lowercase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict:
"""simple docstring"""
lowercase__ = ['的', '人', '有']
lowercase__ = ''.join(a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ = True
lowercase__ = self.tokenizer_class.from_pretrained(a , **a )
lowercase__ = self.rust_tokenizer_class.from_pretrained(a , **a )
lowercase__ = tokenizer_p.encode(a , add_special_tokens=a )
lowercase__ = tokenizer_r.encode(a , add_special_tokens=a )
lowercase__ = tokenizer_r.convert_ids_to_tokens(a )
lowercase__ = tokenizer_p.convert_ids_to_tokens(a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(a , a )
self.assertListEqual(a , a )
lowercase__ = False
lowercase__ = self.rust_tokenizer_class.from_pretrained(a , **a )
lowercase__ = self.tokenizer_class.from_pretrained(a , **a )
lowercase__ = tokenizer_r.encode(a , add_special_tokens=a )
lowercase__ = tokenizer_p.encode(a , add_special_tokens=a )
lowercase__ = tokenizer_r.convert_ids_to_tokens(a )
lowercase__ = tokenizer_p.convert_ids_to_tokens(a )
# it is expected that only the first Chinese character is not preceded by "##".
lowercase__ = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(a )
]
self.assertListEqual(a , a )
self.assertListEqual(a , a )
| 700 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Tuple = 'ClapFeatureExtractor'
_UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[Any] , a : int , a : str )-> Any:
"""simple docstring"""
super().__init__(a , a )
def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = kwargs.pop('sampling_rate' , a )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
lowercase__ = self.tokenizer(a , return_tensors=a , **a )
if audios is not None:
lowercase__ = self.feature_extractor(
a , sampling_rate=a , return_tensors=a , **a )
if text is not None and audios is not None:
lowercase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 45 | 0 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
snake_case_ = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
snake_case_ = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __lowercase (_SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Optional[Any] ):
SCREAMING_SNAKE_CASE : int = SavedModel()
SCREAMING_SNAKE_CASE : List[Any] = []
with open(os.path.join(__lowerCAmelCase , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
SCREAMING_SNAKE_CASE : str = json.load(__lowerCAmelCase )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__lowerCAmelCase )] )
with open(__lowerCAmelCase , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE : Union[str, Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE : Tuple = sorted(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__lowerCAmelCase )
if strict and len(__lowerCAmelCase ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__lowerCAmelCase ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__lowerCAmelCase , sep='''\n''' )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
snake_case_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 507 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
snake_case : List[str] = True
except ImportError:
snake_case : Optional[Any] = False
snake_case : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowercase ( __lowerCAmelCase : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class snake_case_ (lowerCamelCase_ ):
@staticmethod
def lowerCamelCase__( __snake_case :ArgumentParser ) -> Optional[Any]:
a__ = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=__snake_case ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=__snake_case ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=__snake_case )
def __init__( self :Optional[int] ,__snake_case :bool ,__snake_case :str ,__snake_case :Dict=None ,*__snake_case :Optional[int] ) -> Dict:
a__ = testing
a__ = testing_file
a__ = path
def lowerCamelCase__( self :List[Any] ) -> Any:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
a__ = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(__snake_case ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
a__ = (
Path(__snake_case ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
a__ = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__snake_case ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
a__ = json.load(__snake_case )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=__snake_case ,extra_context=__snake_case ,)
a__ = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
a__ = json.load(__snake_case )
a__ = configuration['lowercase_modelname']
a__ = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
a__ = 'PyTorch' in generate_tensorflow_pytorch_and_flax
a__ = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
a__ = 'Flax' in generate_tensorflow_pytorch_and_flax
a__ = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(__snake_case ,exist_ok=__snake_case )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=__snake_case )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(__snake_case :Tuple ):
with open(__snake_case ,'r' ) as f:
a__ = f.readlines()
with open(__snake_case ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__snake_case )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__snake_case :str ,__snake_case :str ,__snake_case :List[str] ):
# Create temp file
a__ , a__ = mkstemp()
a__ = False
with fdopen(__snake_case ,'w' ) as new_file:
with open(__snake_case ) as old_file:
for line in old_file:
new_file.write(__snake_case )
if line_to_copy_below in line:
a__ = True
for line_to_copy in lines_to_copy:
new_file.write(__snake_case )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(__snake_case ,__snake_case )
# Remove original file
remove(__snake_case )
# Move new file
move(__snake_case ,__snake_case )
def skip_units(__snake_case :Optional[Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__snake_case :int ):
with open(__snake_case ) as datafile:
a__ = []
a__ = False
a__ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
a__ = line.split('"' )[1]
a__ = skip_units(__snake_case )
elif "# Below: " in line and "##" not in line:
a__ = line.split('"' )[1]
a__ = skip_units(__snake_case )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__snake_case ,__snake_case ,__snake_case )
a__ = []
elif "# Replace with" in line and "##" not in line:
a__ = []
elif "##" not in line:
lines_to_copy.append(__snake_case )
remove(__snake_case )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(__snake_case )
| 335 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = "lxmert"
_lowercase : int = {}
def __init__( self: Optional[Any] , __UpperCamelCase: Dict=3_05_22 , __UpperCamelCase: List[str]=7_68 , __UpperCamelCase: List[str]=12 , __UpperCamelCase: str=95_00 , __UpperCamelCase: Any=16_00 , __UpperCamelCase: Dict=4_00 , __UpperCamelCase: int=30_72 , __UpperCamelCase: Optional[Any]="gelu" , __UpperCamelCase: Tuple=0.1 , __UpperCamelCase: Dict=0.1 , __UpperCamelCase: Dict=5_12 , __UpperCamelCase: Dict=2 , __UpperCamelCase: Optional[Any]=0.02 , __UpperCamelCase: List[str]=1E-12 , __UpperCamelCase: List[Any]=9 , __UpperCamelCase: List[Any]=5 , __UpperCamelCase: Tuple=5 , __UpperCamelCase: Dict=20_48 , __UpperCamelCase: List[str]=4 , __UpperCamelCase: str=6.67 , __UpperCamelCase: Optional[Any]=True , __UpperCamelCase: Tuple=True , __UpperCamelCase: List[str]=True , __UpperCamelCase: List[Any]=True , __UpperCamelCase: int=True , __UpperCamelCase: Dict=True , __UpperCamelCase: str=True , **__UpperCamelCase: List[str] , ):
'''simple docstring'''
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = num_qa_labels
__magic_name__ = num_object_labels
__magic_name__ = num_attr_labels
__magic_name__ = l_layers
__magic_name__ = x_layers
__magic_name__ = r_layers
__magic_name__ = visual_feat_dim
__magic_name__ = visual_pos_dim
__magic_name__ = visual_loss_normalizer
__magic_name__ = task_matched
__magic_name__ = task_mask_lm
__magic_name__ = task_obj_predict
__magic_name__ = task_qa
__magic_name__ = visual_obj_loss
__magic_name__ = visual_attr_loss
__magic_name__ = visual_feat_loss
__magic_name__ = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**__UpperCamelCase )
| 719 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = "time_series_transformer"
_lowercase : Dict = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self: str , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: str = "student_t" , __UpperCamelCase: str = "nll" , __UpperCamelCase: int = 1 , __UpperCamelCase: List[int] = [1, 2, 3, 4, 5, 6, 7] , __UpperCamelCase: Optional[Union[str, bool]] = "mean" , __UpperCamelCase: int = 0 , __UpperCamelCase: int = 0 , __UpperCamelCase: int = 0 , __UpperCamelCase: int = 0 , __UpperCamelCase: Optional[List[int]] = None , __UpperCamelCase: Optional[List[int]] = None , __UpperCamelCase: int = 32 , __UpperCamelCase: int = 32 , __UpperCamelCase: int = 2 , __UpperCamelCase: int = 2 , __UpperCamelCase: int = 2 , __UpperCamelCase: int = 2 , __UpperCamelCase: bool = True , __UpperCamelCase: str = "gelu" , __UpperCamelCase: int = 64 , __UpperCamelCase: float = 0.1 , __UpperCamelCase: float = 0.1 , __UpperCamelCase: float = 0.1 , __UpperCamelCase: float = 0.1 , __UpperCamelCase: float = 0.1 , __UpperCamelCase: int = 1_00 , __UpperCamelCase: float = 0.02 , __UpperCamelCase: Any=True , **__UpperCamelCase: Optional[Any] , ):
'''simple docstring'''
__magic_name__ = prediction_length
__magic_name__ = context_length or prediction_length
__magic_name__ = distribution_output
__magic_name__ = loss
__magic_name__ = input_size
__magic_name__ = num_time_features
__magic_name__ = lags_sequence
__magic_name__ = scaling
__magic_name__ = num_dynamic_real_features
__magic_name__ = num_static_real_features
__magic_name__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__magic_name__ = cardinality
else:
__magic_name__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__magic_name__ = embedding_dimension
else:
__magic_name__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__magic_name__ = num_parallel_samples
# Transformer architecture configuration
__magic_name__ = input_size * len(__UpperCamelCase ) + self._number_of_features
__magic_name__ = d_model
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_attention_heads
__magic_name__ = encoder_ffn_dim
__magic_name__ = decoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = decoder_layers
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = use_cache
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 184 | 0 |
def a_ ( _A ) -> int:
"""simple docstring"""
if n_term == "":
return []
snake_case__ = []
for temp in range(int(_A ) ):
series.append(f'''1/{temp + 1}''' if series else '1' )
return series
if __name__ == "__main__":
__UpperCamelCase : int = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 328 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = KandinskyInpaintPipeline
A_ : Tuple = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
A_ : Optional[Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
A_ : int = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
A_ : str = False
@property
def __lowerCAmelCase ( self : List[str] ) -> Dict:
return 32
@property
def __lowerCAmelCase ( self : List[Any] ) -> Any:
return 32
@property
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
return self.time_input_dim
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : int ) -> Any:
return 100
@property
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
__magic_name__ : Any = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def __lowerCAmelCase ( self : str ) -> List[Any]:
torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__magic_name__ : int = MultilingualCLIP(_A )
__magic_name__ : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
torch.manual_seed(0 )
__magic_name__ : Tuple = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__magic_name__ : Optional[Any] = UNetaDConditionModel(**_A )
return model
@property
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self : Dict ) -> Tuple:
__magic_name__ : List[str] = self.dummy_text_encoder
__magic_name__ : List[str] = self.dummy_tokenizer
__magic_name__ : Tuple = self.dummy_unet
__magic_name__ : Any = self.dummy_movq
__magic_name__ : List[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_A , set_alpha_to_one=_A , steps_offset=1 , prediction_type='epsilon' , thresholding=_A , )
__magic_name__ : str = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowerCAmelCase ( self : Dict , _A : Optional[int] , _A : List[Any]=0 ) -> int:
__magic_name__ : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A )
# create init_image
__magic_name__ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('RGB' ).resize((256, 256) )
# create mask
__magic_name__ : int = np.ones((64, 64) , dtype=np.floataa )
__magic_name__ : Optional[int] = 0
if str(_A ).startswith('mps' ):
__magic_name__ : Any = torch.manual_seed(_A )
else:
__magic_name__ : Optional[int] = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : Optional[int] = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
__magic_name__ : Dict = 'cpu'
__magic_name__ : str = self.get_dummy_components()
__magic_name__ : Dict = self.pipeline_class(**_A )
__magic_name__ : Any = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__magic_name__ : List[Any] = pipe(**self.get_dummy_inputs(_A ) )
__magic_name__ : Tuple = output.images
__magic_name__ : Any = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__magic_name__ : str = image[0, -3:, -3:, -1]
__magic_name__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Optional[Any] = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def __lowerCAmelCase ( self : str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
__magic_name__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
__magic_name__ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__magic_name__ : Optional[Any] = np.ones((768, 768) , dtype=np.floataa )
__magic_name__ : Tuple = 0
__magic_name__ : Optional[int] = 'a hat'
__magic_name__ : Any = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
__magic_name__ : Optional[int] = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
__magic_name__ : List[Any] = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
__magic_name__ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
__magic_name__ , __magic_name__ : List[Any] = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__magic_name__ : Optional[int] = pipeline(
_A , image=_A , mask_image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
__magic_name__ : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A ) | 561 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 4 ):
A_ : Optional[int] = abs(__lowerCAmelCase ) or 4
return [[1 + x + y * row_size for x in range(__lowerCAmelCase )] for y in range(__lowerCAmelCase )]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return reverse_row(transpose(__lowerCAmelCase ) )
# OR.. transpose(reverse_column(matrix))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return reverse_row(reverse_column(__lowerCAmelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return reverse_column(transpose(__lowerCAmelCase ) )
# OR.. transpose(reverse_row(matrix))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : List[Any] = [list(__lowerCAmelCase ) for x in zip(*__lowerCAmelCase )]
return matrix
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = matrix[::-1]
return matrix
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : int = [x[::-1] for x in matrix]
return matrix
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
for i in matrix:
print(*__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
UpperCamelCase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
UpperCamelCase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 709 |
from typing import Dict
from .base import GenericTensor, Pipeline
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
if tokenize_kwargs is None:
A_ : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
A_ : Optional[Any] = truncation
A_ : Dict = tokenize_kwargs
A_ : Union[str, Any] = {}
if return_tensors is not None:
A_ : Union[str, Any] = return_tensors
return preprocess_params, {}, postprocess_params
def _snake_case ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Dict[str, GenericTensor]:
'''simple docstring'''
A_ : Optional[Any] = self.framework
A_ : Tuple = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return model_inputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : str = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->Any:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 152 | 0 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
SCREAMING_SNAKE_CASE = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
SCREAMING_SNAKE_CASE = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def A__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : int=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : List[str]=False , ) -> Dict:
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowercase : str =np.array([re.sub(UpperCAmelCase , '''''' , UpperCAmelCase ) for x in predictions] )
lowercase : List[Any] =np.array([re.sub(UpperCAmelCase , '''''' , UpperCAmelCase ) for x in references] )
else:
lowercase : int =np.asarray(UpperCAmelCase )
lowercase : str =np.asarray(UpperCAmelCase )
if ignore_case:
lowercase : Optional[int] =np.char.lower(UpperCAmelCase )
lowercase : int =np.char.lower(UpperCAmelCase )
if ignore_punctuation:
lowercase : str =string.punctuation.maketrans('''''' , '''''' , string.punctuation )
lowercase : int =np.char.translate(UpperCAmelCase , table=UpperCAmelCase )
lowercase : Union[str, Any] =np.char.translate(UpperCAmelCase , table=UpperCAmelCase )
if ignore_numbers:
lowercase : int =string.digits.maketrans('''''' , '''''' , string.digits )
lowercase : List[Any] =np.char.translate(UpperCAmelCase , table=UpperCAmelCase )
lowercase : int =np.char.translate(UpperCAmelCase , table=UpperCAmelCase )
lowercase : List[Any] =predictions == references
return {"exact_match": np.mean(UpperCAmelCase ) * 100}
| 94 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowercase_ ( __A : str ) -> Union[str, Any]:
"""simple docstring"""
return x + 2
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[int] ='''x = 3'''
lowercase : Any ={}
lowercase : Union[str, Any] =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
assert result == 3
self.assertDictEqual(UpperCAmelCase , {'''x''': 3} )
lowercase : str ='''x = y'''
lowercase : Optional[int] ={'''y''': 5}
lowercase : List[str] =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase , {'''x''': 5, '''y''': 5} )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''y = add_two(x)'''
lowercase : str ={'''x''': 3}
lowercase : List[str] =evaluate(UpperCAmelCase , {'''add_two''': add_two} , state=UpperCAmelCase )
assert result == 5
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase : Optional[Any] =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : int ='''x = 3'''
lowercase : Dict ={}
lowercase : List[Any] =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
assert result == 3
self.assertDictEqual(UpperCAmelCase , {'''x''': 3} )
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
lowercase : Optional[Any] ='''test_dict = {\'x\': x, \'y\': add_two(x)}'''
lowercase : str ={'''x''': 3}
lowercase : Tuple =evaluate(UpperCAmelCase , {'''add_two''': add_two} , state=UpperCAmelCase )
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''x = 3\ny = 5'''
lowercase : int ={}
lowercase : List[str] =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''y''': 5} )
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
lowercase : List[str] ='''text = f\'This is x: {x}.\''''
lowercase : Union[str, Any] ={'''x''': 3}
lowercase : Tuple =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def A__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowercase : Tuple ='''if x <= 3:\n y = 2\nelse:\n y = 5'''
lowercase : Union[str, Any] ={'''x''': 3}
lowercase : Any =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''y''': 2} )
lowercase : Optional[Any] ={'''x''': 8}
lowercase : str =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase , {'''x''': 8, '''y''': 5} )
def A__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] ='''test_list = [x, add_two(x)]'''
lowercase : Any ={'''x''': 3}
lowercase : Tuple =evaluate(UpperCAmelCase , {'''add_two''': add_two} , state=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [3, 5] )
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''test_list''': [3, 5]} )
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
lowercase : str ='''y = x'''
lowercase : Dict ={'''x''': 3}
lowercase : Tuple =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
assert result == 3
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''y''': 3} )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : Any ='''test_list = [x, add_two(x)]\ntest_list[1]'''
lowercase : Any ={'''x''': 3}
lowercase : Union[str, Any] =evaluate(UpperCAmelCase , {'''add_two''': add_two} , state=UpperCAmelCase )
assert result == 5
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''test_list''': [3, 5]} )
lowercase : int ='''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
lowercase : Union[str, Any] ={'''x''': 3}
lowercase : Tuple =evaluate(UpperCAmelCase , {'''add_two''': add_two} , state=UpperCAmelCase )
assert result == 5
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def A__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] ='''x = 0\nfor i in range(3):\n x = i'''
lowercase : List[str] ={}
lowercase : Union[str, Any] =evaluate(UpperCAmelCase , {'''range''': range} , state=UpperCAmelCase )
assert result == 2
self.assertDictEqual(UpperCAmelCase , {'''x''': 2, '''i''': 2} )
| 94 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self , lowerCamelCase = 1 , lowerCamelCase = None , lowerCamelCase = 50 , lowerCamelCase = "pil" , lowerCamelCase = True , **lowerCamelCase , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
UpperCamelCase : Dict = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCamelCase , )
UpperCamelCase : Optional[int] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase : Dict = self.unet(lowerCamelCase , lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase : Any = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
UpperCamelCase : str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase : List[Any] = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowerCamelCase ), "This is a local test"
| 705 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 435 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class lowercase(_lowercase , unittest.TestCase ):
__snake_case: Union[str, Any] = SpeechTaTokenizer
__snake_case: Any = False
__snake_case: List[Any] = True
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a__ = SpeechTaTokenizer(__SCREAMING_SNAKE_CASE )
a__ = AddedToken('<mask>' , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )
a__ = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
a__ = 'this is a test'
a__ = 'this is a test'
return input_text, output_text
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2_0 , __SCREAMING_SNAKE_CASE=5 ) -> Optional[Any]:
"""simple docstring"""
a__ , a__ = self.get_input_output_texts(__SCREAMING_SNAKE_CASE )
a__ = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a__ = tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
return text, ids
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
a__ = '<pad>'
a__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> int:
"""simple docstring"""
a__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 8_1 )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 7_9 )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
a__ = self.get_tokenizers(do_lower_case=__SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
a__ = tokenizer.vocab_size
a__ = len(__SCREAMING_SNAKE_CASE )
self.assertNotEqual(__SCREAMING_SNAKE_CASE , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
a__ = ['aaaaa bbbbbb', 'cccccccccdddddddd']
a__ = tokenizer.add_tokens(__SCREAMING_SNAKE_CASE )
a__ = tokenizer.vocab_size
a__ = len(__SCREAMING_SNAKE_CASE )
self.assertNotEqual(__SCREAMING_SNAKE_CASE , 0 )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , all_size + len(__SCREAMING_SNAKE_CASE ) )
a__ = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(len(__SCREAMING_SNAKE_CASE ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
a__ = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
a__ = tokenizer.add_special_tokens(__SCREAMING_SNAKE_CASE )
a__ = tokenizer.vocab_size
a__ = len(__SCREAMING_SNAKE_CASE )
self.assertNotEqual(__SCREAMING_SNAKE_CASE , 0 )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , all_size_a + len(__SCREAMING_SNAKE_CASE ) )
a__ = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(len(__SCREAMING_SNAKE_CASE ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
a__ = self.get_tokenizer()
a__ = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(__SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , )
a__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
a__ = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
# fmt: off
self.assertListEqual(__SCREAMING_SNAKE_CASE , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] )
# fmt: on
a__ = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
a__ = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
a__ = {
'input_ids': [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=__SCREAMING_SNAKE_CASE , )
| 273 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a : List[str] = 16
a : str = 32
def __magic_name__ ( UpperCamelCase : Accelerator , UpperCamelCase : int = 16 ) -> Dict:
a__ = AutoTokenizer.from_pretrained('bert-base-cased' )
a__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCamelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
a__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase , max_length=UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ = datasets.map(
UpperCamelCase , batched=UpperCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ = 16
elif accelerator.mixed_precision != "no":
a__ = 8
else:
a__ = None
return tokenizer.pad(
UpperCamelCase , padding='longest' , max_length=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_tensors='pt' , )
# Instantiate dataloaders.
a__ = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
a__ = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a : Union[str, Any] = mocked_dataloaders # noqa: F811
def __magic_name__ ( UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ) -> Optional[Any]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , UpperCamelCase ) == "1":
a__ = 2
# New Code #
a__ = int(args.gradient_accumulation_steps )
a__ = int(args.local_sgd_steps )
# Initialize accelerator
a__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ = config['lr']
a__ = int(config['num_epochs'] )
a__ = int(config['seed'] )
a__ = int(config['batch_size'] )
a__ = evaluate.load('glue' , 'mrpc' )
set_seed(UpperCamelCase )
a__ , a__ = get_dataloaders(UpperCamelCase , UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ = model.to(accelerator.device )
# Instantiate optimizer
a__ = AdamW(params=model.parameters() , lr=UpperCamelCase )
# Instantiate scheduler
a__ = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Now we train the model
for epoch in range(UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=UpperCamelCase , model=UpperCamelCase , local_sgd_steps=UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCamelCase ):
a__ = model(**UpperCamelCase )
a__ = output.loss
accelerator.backward(UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ = model(**UpperCamelCase )
a__ = outputs.logits.argmax(dim=-1 )
a__ , a__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCamelCase , references=UpperCamelCase , )
a__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , UpperCamelCase )
def __magic_name__ ( ) -> Any:
a__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=UpperCamelCase , default=UpperCamelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=UpperCamelCase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=UpperCamelCase , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
a__ = parser.parse_args()
a__ = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
main()
| 273 | 1 |
def __UpperCamelCase ( a = 100) ->int:
lowerCamelCase__ = (n * (n + 1) // 2) ** 2
lowerCamelCase__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 360 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
A_ = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
A__ = "albert"
def __init__( self , _lowerCAmelCase=3_0000 , _lowerCAmelCase=128 , _lowerCAmelCase=4096 , _lowerCAmelCase=12 , _lowerCAmelCase=1 , _lowerCAmelCase=64 , _lowerCAmelCase=1_6384 , _lowerCAmelCase=1 , _lowerCAmelCase="gelu_new" , _lowerCAmelCase=0 , _lowerCAmelCase=0 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase="absolute" , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , **_lowerCAmelCase , ):
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = embedding_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_hidden_groups
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = inner_group_num
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = classifier_dropout_prob
lowerCamelCase__ = position_embedding_type
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
@property
def __magic_name__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 360 | 1 |
'''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
a : Optional[int] = logging.get_logger(__name__)
a : Tuple = r'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class a ( _lowerCamelCase ):
@add_start_docstrings(lowercase_ )
def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Any ):
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class a ( _lowerCamelCase ):
def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ):
snake_case_ = max_length
snake_case_ = max_position_embeddings
@add_start_docstrings(lowercase_ )
def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : int ):
snake_case_ = input_ids.shape[-1]
snake_case_ = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
F"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class a ( _lowerCamelCase ):
def __init__( self : int , lowercase_ : int , lowercase_ : int ):
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
F"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
'''with `max_length = start_length + max_new_tokens` instead.''' , lowercase_ , )
snake_case_ = start_length
snake_case_ = max_new_tokens
snake_case_ = start_length + max_new_tokens
@add_start_docstrings(lowercase_ )
def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ):
return input_ids.shape[-1] >= self.max_length
class a ( _lowerCamelCase ):
def __init__( self : int , lowercase_ : float , lowercase_ : Optional[float] = None ):
snake_case_ = max_time
snake_case_ = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowercase_ )
def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Tuple ):
return time.time() - self.initial_timestamp > self.max_time
class a ( _lowerCamelCase ):
@add_start_docstrings(lowercase_ )
def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Optional[Any] ):
return any(criteria(lowercase_ , lowercase_ ) for criteria in self )
@property
def A_ ( self : Union[str, Any] ):
for stopping_criterium in self:
if isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
elif isinstance(lowercase_ , lowercase_ ):
return stopping_criterium.max_length
return None
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> StoppingCriteriaList:
'''simple docstring'''
snake_case_ = stopping_criteria.max_length
snake_case_ = deepcopy(__UpperCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''', __UpperCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__UpperCAmelCase ) )
return new_stopping_criteria
| 640 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class a :
def __init__( self : Dict , lowercase_ : str , lowercase_ : Union[str, Any]=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : Tuple=True , lowercase_ : int=True , lowercase_ : Dict=True , lowercase_ : Any=True , lowercase_ : Union[str, Any]=99 , lowercase_ : Tuple=[1, 1, 2] , lowercase_ : List[Any]=1 , lowercase_ : int=32 , lowercase_ : List[Any]=4 , lowercase_ : Tuple=8 , lowercase_ : Union[str, Any]=37 , lowercase_ : Union[str, Any]="gelu_new" , lowercase_ : str=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[int]=512 , lowercase_ : int=3 , lowercase_ : Dict=0.02 , lowercase_ : Union[str, Any]=3 , lowercase_ : Dict=4 , lowercase_ : List[str]=None , lowercase_ : Tuple=False , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = block_sizes
snake_case_ = num_decoder_layers
snake_case_ = d_model
snake_case_ = n_head
snake_case_ = d_head
snake_case_ = d_inner
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = 2
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = initializer_std
# Used in the tests to check the size of the first attention layer
snake_case_ = n_head
# Used in the tests to check the size of the first hidden state
snake_case_ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
snake_case_ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
snake_case_ = self.num_hidden_layers + 2
def A_ ( self : Union[str, Any] ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def A_ ( self : Optional[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int] , ):
snake_case_ = TFFunnelModel(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelModel(config=lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelModel(config=lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def A_ ( self : List[Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : List[Any] , ):
snake_case_ = TFFunnelBaseModel(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelBaseModel(config=lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelBaseModel(config=lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def A_ ( self : Any , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Union[str, Any] , ):
snake_case_ = TFFunnelForPreTraining(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , ):
snake_case_ = TFFunnelForMaskedLM(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Dict , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Tuple , ):
snake_case_ = self.num_labels
snake_case_ = TFFunnelForSequenceClassification(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : int , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
snake_case_ = self.num_choices
snake_case_ = TFFunnelForMultipleChoice(config=lowercase_ )
snake_case_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Any , lowercase_ : str , lowercase_ : Dict , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , ):
snake_case_ = self.num_labels
snake_case_ = TFFunnelForTokenClassification(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Any , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , ):
snake_case_ = TFFunnelForQuestionAnswering(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Any ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def A_ ( self : int ):
snake_case_ = TFFunnelModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowercase_ )
def A_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def A_ ( self : List[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def A_ ( self : List[str] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase_ )
def A_ ( self : str ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def A_ ( self : List[str] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def A_ ( self : List[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@require_tf
class a ( _lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case_ = False
snake_case_ = False
def A_ ( self : Union[str, Any] ):
snake_case_ = TFFunnelModelTester(self , base=lowercase_ )
snake_case_ = ConfigTester(self , config_class=lowercase_ )
def A_ ( self : Dict ):
self.config_tester.run_common_tests()
def A_ ( self : List[str] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def A_ ( self : str ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
| 640 | 1 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _lowerCAmelCase ( __lowercase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int = None , SCREAMING_SNAKE_CASE : Dict = None , SCREAMING_SNAKE_CASE : int = False , SCREAMING_SNAKE_CASE : Optional[int] = False , SCREAMING_SNAKE_CASE : List[str] = None , SCREAMING_SNAKE_CASE : List[Any] = None , **SCREAMING_SNAKE_CASE : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , )
lowerCAmelCase = Generator(
cache_dir=__A , features=__A , generator=__A , gen_kwargs=__A , **__A , )
def __A ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
if self.streaming:
lowerCAmelCase = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , )
lowerCAmelCase = self.builder.as_dataset(
split="train" , verification_mode=__A , in_memory=self.keep_in_memory )
return dataset
| 714 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
lowercase : Any = threading.Lock()
lowercase : Optional[logging.Handler] = None
lowercase : List[Any] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
lowercase : List[Any] = logging.WARNING
lowercase : int = True
def __a ( ) -> Tuple:
lowerCAmelCase = os.getenv("TRANSFORMERS_VERBOSITY" , A__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __a ( ) -> str:
return __name__.split("." )[0]
def __a ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def __a ( ) -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
lowerCAmelCase = logging.StreamHandler() # Set sys.stderr as stream.
lowerCAmelCase = sys.stderr.flush
# Apply our default configuration to the library root logger.
lowerCAmelCase = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
lowerCAmelCase = False
def __a ( ) -> None:
global _default_handler
with _lock:
if not _default_handler:
return
lowerCAmelCase = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
lowerCAmelCase = None
def __a ( ) -> List[Any]:
return log_levels
def __a ( A__ = None ) -> logging.Logger:
if name is None:
lowerCAmelCase = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(A__ )
def __a ( ) -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __a ( A__ ) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(A__ )
def __a ( ) -> List[Any]:
return set_verbosity(A__ )
def __a ( ) -> str:
return set_verbosity(A__ )
def __a ( ) -> List[str]:
return set_verbosity(A__ )
def __a ( ) -> Tuple:
return set_verbosity(A__ )
def __a ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __a ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __a ( A__ ) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(A__ )
def __a ( A__ ) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(A__ )
def __a ( ) -> None:
_configure_library_root_logger()
lowerCAmelCase = False
def __a ( ) -> None:
_configure_library_root_logger()
lowerCAmelCase = True
def __a ( ) -> None:
lowerCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
lowerCAmelCase = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(A__ )
def __a ( ) -> None:
lowerCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(A__ )
def __a ( self , *A__ , **A__ ) -> List[Any]:
lowerCAmelCase = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , A__ )
if no_advisory_warnings:
return
self.warning(*A__ , **A__ )
lowercase : int = warning_advice
@functools.lru_cache(A__ )
def __a ( self , *A__ , **A__ ) -> Tuple:
self.warning(*A__ , **A__ )
lowercase : int = warning_once
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple: # pylint: disable=unused-argument
"""simple docstring"""
lowerCAmelCase = args[0] if args else None
def __iter__( self : Optional[int] ) -> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : str , SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[int] ) -> int:
"""simple docstring"""
return self
def __exit__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
return
class _lowerCAmelCase :
"""simple docstring"""
def __call__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __A ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __A ( self : str ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase : Union[str, Any] = _tqdm_cls()
def __a ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def __a ( ) -> Optional[Any]:
global _tqdm_active
lowerCAmelCase = True
hf_hub_utils.enable_progress_bars()
def __a ( ) -> Optional[Any]:
global _tqdm_active
lowerCAmelCase = False
hf_hub_utils.disable_progress_bars()
| 159 | 0 |
def _UpperCAmelCase ( a : Tuple , a : Tuple , a : Optional[Any] , a : Tuple , a : Optional[Any] , a : Union[str, Any] ):
if index == r:
for j in range(a ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
snake_case__ = arr[i]
combination_util(a , a , a , index + 1 , a , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(a , a , a , a , a , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _UpperCAmelCase ( a : int , a : Tuple , a : Optional[Any] ):
# A temporary array to store all combination one by one
snake_case__ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(a , a , a , 0 , a , 0 )
if __name__ == "__main__":
# Driver code to check the function above
a__ = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 654 |
def _UpperCAmelCase ( a : int ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 1 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : List[str] = """Hello world! cécé herlolip"""
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : bool ):
snake_case__ : str = FairseqRobertaModel.from_pretrained(snake_case_ )
roberta.eval() # disable dropout
snake_case__ : Tuple = roberta.model.encoder.sentence_encoder
snake_case__ : str = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
snake_case__ : Optional[int] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , snake_case_ )
snake_case__ : List[str] = XLMRobertaXLForSequenceClassification(snake_case_ ) if classification_head else XLMRobertaXLForMaskedLM(snake_case_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case__ : Any = roberta_sent_encoder.embed_tokens.weight
snake_case__ : Optional[int] = roberta_sent_encoder.embed_positions.weight
snake_case__ : List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case__ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
snake_case__ : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case__ : BertLayer = model.roberta.encoder.layer[i]
snake_case__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
snake_case__ : RobertaAttention = layer.attention
snake_case__ : Union[str, Any] = roberta_layer.self_attn_layer_norm.weight
snake_case__ : int = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case__ : Optional[Any] = roberta_layer.self_attn.q_proj.weight
snake_case__ : Optional[Any] = roberta_layer.self_attn.q_proj.bias
snake_case__ : List[str] = roberta_layer.self_attn.k_proj.weight
snake_case__ : str = roberta_layer.self_attn.k_proj.bias
snake_case__ : int = roberta_layer.self_attn.v_proj.weight
snake_case__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
snake_case__ : Any = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case__ : Any = roberta_layer.final_layer_norm.weight
snake_case__ : List[Any] = roberta_layer.final_layer_norm.bias
# intermediate
snake_case__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case__ : Dict = roberta_layer.fca.weight
snake_case__ : Any = roberta_layer.fca.bias
# output
snake_case__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case__ : Dict = roberta_layer.fca.weight
snake_case__ : Union[str, Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case__ : Any = roberta.model.classification_heads["mnli"].dense.weight
snake_case__ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias
snake_case__ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight
snake_case__ : Union[str, Any] = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case__ : Tuple = roberta.model.encoder.lm_head.dense.weight
snake_case__ : Any = roberta.model.encoder.lm_head.dense.bias
snake_case__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
snake_case__ : List[Any] = roberta.model.encoder.lm_head.layer_norm.bias
snake_case__ : int = roberta.model.encoder.lm_head.weight
snake_case__ : Any = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case__ : torch.Tensor = roberta.encode(snake_case_ ).unsqueeze(0 ) # batch of size 1
snake_case__ : Any = model(snake_case_ )[0]
if classification_head:
snake_case__ : Tuple = roberta.model.classification_heads["mnli"](roberta.extract_features(snake_case_ ) )
else:
snake_case__ : Optional[Any] = roberta.model(snake_case_ )[0]
print(our_output.shape , their_output.shape )
snake_case__ : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
snake_case__ : Optional[Any] = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(snake_case_ ).mkdir(parents=snake_case_ , exist_ok=snake_case_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 25 |
from __future__ import annotations
import time
__lowerCamelCase : str = list[tuple[int, int]]
__lowerCamelCase : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ):
snake_case__ : Optional[int] = pos_x
snake_case__ : Dict = pos_y
snake_case__ : int = (pos_y, pos_x)
snake_case__ : Optional[int] = goal_x
snake_case__ : Tuple = goal_y
snake_case__ : str = parent
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ):
snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A )
snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A )
snake_case__ : int = [self.start]
snake_case__ : Union[str, Any] = False
def _lowercase ( self : Dict ):
while self.node_queue:
snake_case__ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ : Optional[Any] = True
return self.retrace_path(__A )
snake_case__ : int = self.get_successors(__A )
for node in successors:
self.node_queue.append(__A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Union[str, Any] , __A : Node ):
snake_case__ : str = []
for action in delta:
snake_case__ : str = parent.pos_x + action[1]
snake_case__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) )
return successors
def _lowercase ( self : Optional[Any] , __A : Node | None ):
snake_case__ : Tuple = node
snake_case__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : Tuple = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : str , __A : int ):
snake_case__ : str = BreadthFirstSearch(__A , __A )
snake_case__ : int = BreadthFirstSearch(__A , __A )
snake_case__ : Tuple = False
def _lowercase ( self : Optional[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 )
snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ : List[str] = True
return self.retrace_bidirectional_path(
__A , __A )
snake_case__ : Union[str, Any] = current_bwd_node
snake_case__ : Dict = current_fwd_node
snake_case__ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(__A ),
self.bwd_bfs: self.bwd_bfs.get_successors(__A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Any , __A : Node , __A : Node ):
snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A )
snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowerCamelCase : str = (0, 0)
__lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : Any = time.time()
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal)
__lowerCamelCase : str = bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
__lowerCamelCase : Optional[Any] = time.time()
__lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
__lowerCamelCase : str = bd_bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 25 | 1 |
def _UpperCamelCase ( lowerCAmelCase_ = 5_0 ) ->int:
UpperCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 377 |
from manim import *
class lowercase ( snake_case__):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
UpperCAmelCase_= Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase_= Rectangle(height=0.25 , width=0.25 )
UpperCAmelCase_= Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase_= [mem.copy() for i in range(6 )]
UpperCAmelCase_= [mem.copy() for i in range(6 )]
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= Text("""CPU""" , font_size=24 )
UpperCAmelCase_= Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase_= [mem.copy() for i in range(4 )]
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= Text("""GPU""" , font_size=24 )
UpperCAmelCase_= Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase_= [mem.copy() for i in range(6 )]
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= Text("""Model""" , font_size=24 )
UpperCAmelCase_= Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase_= []
UpperCAmelCase_= []
UpperCAmelCase_= []
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
UpperCAmelCase_= Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase )
UpperCAmelCase_= [mem.copy() for i in range(6 )]
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= Text("""Loaded Checkpoint""" , font_size=24 )
UpperCAmelCase_= Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase_= []
UpperCAmelCase_= []
for i, rect in enumerate(__UpperCAmelCase ):
UpperCAmelCase_= fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
ckpt_arr.append(__UpperCAmelCase )
UpperCAmelCase_= target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
UpperCAmelCase_= Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase_= MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
UpperCAmelCase_= MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
UpperCAmelCase_= [meta_mem.copy() for i in range(6 )]
UpperCAmelCase_= [meta_mem.copy() for i in range(6 )]
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase_= Text("""Disk""" , font_size=24 )
UpperCAmelCase_= Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
UpperCAmelCase_= []
for i, rect in enumerate(__UpperCAmelCase ):
UpperCAmelCase_= rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(FadeOut(__UpperCAmelCase ) )
UpperCAmelCase_= MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
self.play(
FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , )
self.wait()
| 593 | 0 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase ( )-> Optional[int]:
'''simple docstring'''
a : Optional[int] = torch.nn.Linear(2 , 4 )
a : Union[str, Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
a : List[Any] = torch.optim.lr_scheduler.OneCycleLR(A_ , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 )
a : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
a : int = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase ( A_ )-> Any:
'''simple docstring'''
a : Optional[int] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(A_ )
class _A ( _a ):
"""simple docstring"""
@require_cuda
def __snake_case ( self : int):
a : Any = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__UpperCAmelCase):
a : Union[str, Any] = Accelerator(cpu=__UpperCAmelCase)
def __snake_case ( self : Optional[Any]):
a : Any = Accelerator()
a : List[str] = GradientState()
assert state.num_steps == 1
a : Union[str, Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
a : str = False
assert state.sync_gradients is False
GradientState._reset_state()
def __snake_case ( self : int):
a : Any = Accelerator()
a , a , a , a , a : Union[str, Any] = create_components()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Optional[Any] = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
self.assertTrue(prepared_model in accelerator._models)
self.assertTrue(prepared_optimizer in accelerator._optimizers)
self.assertTrue(prepared_scheduler in accelerator._schedulers)
self.assertTrue(prepared_train_dl in accelerator._dataloaders)
self.assertTrue(prepared_valid_dl in accelerator._dataloaders)
def __snake_case ( self : Dict):
a : Tuple = Accelerator()
a , a , a , a , a : Tuple = create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
accelerator.free_memory()
self.assertTrue(len(accelerator._models) == 0)
self.assertTrue(len(accelerator._optimizers) == 0)
self.assertTrue(len(accelerator._schedulers) == 0)
self.assertTrue(len(accelerator._dataloaders) == 0)
def __snake_case ( self : Tuple):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__UpperCAmelCase : str , **__UpperCAmelCase : Tuple):
pass
with patch("torch.cuda.set_device" , __UpperCAmelCase), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64"):
a : Optional[int] = Accelerator()
self.assertEqual(str(accelerator.state.device) , "cuda:64")
def __snake_case ( self : Union[str, Any]):
a : List[Any] = Accelerator()
a , a , a , a , a : Tuple = create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = get_signature(__UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase)
# make sure random weights don't match
load_random_weights(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) > 1e-3)
# make sure loaded weights match
accelerator.load_state(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) < 1e-3)
def __snake_case ( self : Tuple):
a : Optional[Any] = Accelerator()
a , a , a , a , a : List[str] = create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : str = get_signature(__UpperCAmelCase)
# saving hook
def save_config(__UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : List[str]):
a : int = {"class_name": models[0].__class__.__name__}
with open(os.path.join(__UpperCAmelCase , "data.json") , "w") as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase)
# loading hook
def load_config(__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any):
with open(os.path.join(__UpperCAmelCase , "data.json") , "r") as f:
a : Dict = json.load(__UpperCAmelCase)
a : int = config["class_name"]
a : str = accelerator.register_save_state_pre_hook(__UpperCAmelCase)
a : Any = accelerator.register_load_state_pre_hook(__UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase)
# make sure random weights don't match with hooks
load_random_weights(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) > 1e-3)
# random class name to verify correct one is loaded
a : Union[str, Any] = "random"
# make sure loaded weights match with hooks
accelerator.load_state(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) < 1e-3)
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__)
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase)
# make sure random weights don't match with hooks removed
load_random_weights(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) > 1e-3)
# random class name to verify correct one is loaded
a : List[Any] = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) < 1e-3)
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__)
def __snake_case ( self : Dict):
a : List[str] = Accelerator()
a , a , a , a , a : int = create_components()
a : List[str] = None
# This should work
a , a , a , a , a , a : List[Any] = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
self.assertTrue(dummy_obj is None)
def __snake_case ( self : Optional[Any]):
a : Union[str, Any] = Accelerator()
a , a , a , a , a : Optional[int] = create_components()
a : Any = [1, 2, 3]
# This should work
a , a , a , a , a , a : Optional[Any] = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def __snake_case ( self : Optional[Any]):
from transformers import AutoModelForCausalLM
a : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__UpperCAmelCase , device_map={"": 0} , )
a : str = Accelerator()
# This should work
a : int = accelerator.prepare(__UpperCAmelCase)
@slow
@require_bnb
def __snake_case ( self : Optional[int]):
from transformers import AutoModelForCausalLM
a : Tuple = Accelerator()
with init_empty_weights():
a : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
a : str = infer_auto_device_map(__UpperCAmelCase)
a : int = "cpu"
a : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , llm_inta_enable_fpaa_cpu_offload=__UpperCAmelCase)
# This should not work and get value error
with self.assertRaises(__UpperCAmelCase):
a : Optional[Any] = accelerator.prepare(__UpperCAmelCase)
@slow
@require_bnb
@require_multi_gpu
def __snake_case ( self : int):
from transformers import AutoModelForCausalLM
a : List[Any] = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
a : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
a : Dict = infer_auto_device_map(__UpperCAmelCase)
a : Any = 1
a : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , )
a : Optional[int] = Accelerator()
# This should not work and get value error
with self.assertRaises(__UpperCAmelCase):
a : List[Any] = accelerator.prepare(__UpperCAmelCase)
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __snake_case ( self : int):
from transformers import AutoModelForCausalLM
with init_empty_weights():
a : List[str] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
a : Union[str, Any] = infer_auto_device_map(__UpperCAmelCase)
a : Tuple = 1
a : Dict = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , )
a : Tuple = Accelerator()
# This should work
a : Optional[int] = accelerator.prepare(__UpperCAmelCase)
@require_cuda
def __snake_case ( self : Optional[Any]):
a : str = torch.nn.Linear(10 , 10)
a : Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.01)
a : Union[str, Any] = Accelerator(cpu=__UpperCAmelCase)
a : Any = accelerator.prepare(__UpperCAmelCase)
| 135 |
"""simple docstring"""
from timeit import timeit
def lowercase ( A_ )-> int:
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
a : Dict = 0
while number:
number &= number - 1
result += 1
return result
def lowercase ( A_ )-> int:
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
a : int = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowercase ( )-> None:
'''simple docstring'''
def do_benchmark(A_ ) -> None:
a : Tuple = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(A_ ) = }''' )
a : List[Any] = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=A_ )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(A_ ) = }''' )
a : Dict = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=A_ , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(A_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 135 | 1 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : list[tuple[float, float]] ):
_lowerCamelCase : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCamelCase : int = len(__A ) - 1
def lowerCamelCase_ ( self : Optional[int],__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,__A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__A ),5 ) == 1
return output_values
def lowerCamelCase_ ( self : int,__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : List[Any] = self.basis_function(__A )
_lowerCamelCase : str = 0.0
_lowerCamelCase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase_ ( self : Optional[Any],__A : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_lowerCamelCase : list[float] = [] # x coordinates of points to plot
_lowerCamelCase : list[float] = [] # y coordinates of points to plot
_lowerCamelCase : Tuple = 0.0
while t <= 1:
_lowerCamelCase : str = self.bezier_curve_function(__A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowerCamelCase : List[str] = [i[0] for i in self.list_of_points]
_lowerCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__A,__A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(__A,__A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 44 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs | 44 | 1 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Any:
lowerCamelCase__ : Any = []
for part_id in partition_order:
lowerCamelCase__ : List[str] = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(_UpperCAmelCase ):
expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
lowerCamelCase__ : List[str] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCamelCase__ : Any = spark.range(100 ).repartition(1 )
lowerCamelCase__ : str = Spark(_UpperCAmelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
lowerCamelCase__ : Any = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCamelCase__ : Optional[int] = spark.range(10 ).repartition(2 )
lowerCamelCase__ : Dict = [1, 0]
lowerCamelCase__ : str = _generate_iterable_examples(_UpperCAmelCase , _UpperCAmelCase ) # Reverse the partitions.
lowerCamelCase__ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCAmelCase , _UpperCAmelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowerCamelCase__ , lowerCamelCase__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
lowerCamelCase__ : Dict = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCamelCase__ : Dict = spark.range(10 ).repartition(1 )
lowerCamelCase__ : List[str] = SparkExamplesIterable(_UpperCAmelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_UpperCAmelCase ):
assert row_id == F"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ) -> Any:
lowerCamelCase__ : Dict = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCamelCase__ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
lowerCamelCase__ : int = lambda _UpperCAmelCase : x.reverse()
lowerCamelCase__ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCAmelCase , [2, 1, 0] )
lowerCamelCase__ : Tuple = SparkExamplesIterable(_UpperCAmelCase ).shuffle_data_sources(_UpperCAmelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_UpperCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
lowerCamelCase__ : Optional[Any] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCamelCase__ : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowerCamelCase__ : Optional[Any] = SparkExamplesIterable(_UpperCAmelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCamelCase__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCAmelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_UpperCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ : int = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowerCamelCase__ : Dict = SparkExamplesIterable(_UpperCAmelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCamelCase__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCAmelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_UpperCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ) -> Dict:
lowerCamelCase__ : int = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCamelCase__ : str = spark.range(100 ).repartition(1 )
lowerCamelCase__ : Dict = Spark(_UpperCAmelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 188 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : Optional[Any] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = ["""CLIPFeatureExtractor"""]
_UpperCAmelCase : Union[str, Any] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 188 | 1 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int ):
while a != 0:
snake_case__, snake_case__ : List[str] = b % a, a
return b
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int ):
if gcd(_A , _A ) != 1:
snake_case__ : Union[str, Any] = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_A )
snake_case__, snake_case__, snake_case__ : List[str] = 1, 0, a
snake_case__, snake_case__, snake_case__ : List[str] = 0, 1, m
while va != 0:
snake_case__ : Any = ua // va
snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ : Dict = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 297 |
from itertools import count
def UpperCamelCase__ ( _A: int = 50 ):
'''simple docstring'''
__lowerCamelCase = [1] * min_block_length
for n in count(_A ):
fill_count_functions.append(1 )
for block_length in range(_A , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 479 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
A__ , A__ : Optional[int] = image.size
A__ , A__ : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A__ : Optional[int] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
A__ : Tuple = np.array(__UpperCamelCase ).astype(np.floataa ) / 2_5_5.0
A__ : List[str] = image[None].transpose(0 , 3 , 1 , 2 )
A__ : List[str] = torch.from_numpy(__UpperCamelCase )
return 2.0 * image - 1.0
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
super().__init__()
self.register_modules(vqvae=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 100 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , ):
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
A__ : str = 1
elif isinstance(UpperCamelCase__ , torch.Tensor ):
A__ : Dict = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCamelCase__ )}" )
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
A__ : Any = preprocess(UpperCamelCase__ )
A__ , A__ : List[Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
A__ : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
A__ : List[str] = next(self.unet.parameters() ).dtype
A__ : str = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ )
A__ : Dict = image.to(device=self.device , dtype=UpperCamelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCamelCase__ , device=self.device )
A__ : Union[str, Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
A__ : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A__ : Optional[Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ : Dict = {}
if accepts_eta:
A__ : Tuple = eta
for t in self.progress_bar(UpperCamelCase__ ):
# concat latents and low resolution image in the channel dimension.
A__ : List[str] = torch.cat([latents, image] , dim=1 )
A__ : List[Any] = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
A__ : List[str] = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
A__ : Dict = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# decode the image latents with the VQVAE
A__ : int = self.vqvae.decode(UpperCamelCase__ ).sample
A__ : Union[str, Any] = torch.clamp(UpperCamelCase__ , -1.0 , 1.0 )
A__ : Optional[int] = image / 2 + 0.5
A__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ : List[str] = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ ) | 55 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : List[Any] = add_prefix_space
A__ : Any = pre_tok_class(**UpperCamelCase__ )
A__ : List[Any] = add_prefix_space
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ : Tuple = input_ids[-self.model_max_length :]
return input_ids | 55 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[str] = (DEISMultistepScheduler,)
UpperCAmelCase : Union[str, Any] = (('num_inference_steps', 2_5),)
def snake_case_ ( self : Dict , **__snake_case : List[Any] ) -> List[Any]:
_a : Optional[int] = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**__snake_case )
return config
def snake_case_ ( self : Optional[Any] , __snake_case : Optional[int]=0 , **__snake_case : int ) -> Any:
_a : Any = dict(self.forward_default_kwargs )
_a : Optional[Any] = kwargs.pop('''num_inference_steps''' , __snake_case )
_a : Tuple = self.dummy_sample
_a : Optional[int] = 0.1 * sample
_a : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a : Dict = self.get_scheduler_config(**__snake_case )
_a : Optional[int] = scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
_a : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
_a : Tuple = scheduler_class.from_pretrained(__snake_case )
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
_a : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_a , _a : Dict = sample, sample
for t in range(__snake_case , time_step + scheduler.config.solver_order + 1 ):
_a : Tuple = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
_a : Tuple = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case_ ( self : Optional[int] ) -> Optional[Any]:
pass
def snake_case_ ( self : int , __snake_case : List[str]=0 , **__snake_case : Any ) -> Optional[Any]:
_a : Dict = dict(self.forward_default_kwargs )
_a : Dict = kwargs.pop('''num_inference_steps''' , __snake_case )
_a : Union[str, Any] = self.dummy_sample
_a : Any = 0.1 * sample
_a : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a : str = self.get_scheduler_config()
_a : List[str] = scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals (must be after setting timesteps)
_a : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
_a : List[str] = scheduler_class.from_pretrained(__snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residual (must be after setting timesteps)
_a : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
_a : Tuple = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
_a : List[str] = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case_ ( self : int , __snake_case : int=None , **__snake_case : List[Any] ) -> List[str]:
if scheduler is None:
_a : Dict = self.scheduler_classes[0]
_a : str = self.get_scheduler_config(**__snake_case )
_a : Union[str, Any] = scheduler_class(**__snake_case )
_a : List[str] = self.scheduler_classes[0]
_a : List[str] = self.get_scheduler_config(**__snake_case )
_a : Union[str, Any] = scheduler_class(**__snake_case )
_a : Any = 10
_a : Any = self.dummy_model()
_a : str = self.dummy_sample_deter
scheduler.set_timesteps(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
_a : List[str] = model(__snake_case , __snake_case )
_a : List[Any] = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
return sample
def snake_case_ ( self : List[Any] ) -> Dict:
_a : Tuple = dict(self.forward_default_kwargs )
_a : Any = kwargs.pop('''num_inference_steps''' , __snake_case )
for scheduler_class in self.scheduler_classes:
_a : Optional[Any] = self.get_scheduler_config()
_a : List[str] = scheduler_class(**__snake_case )
_a : Dict = self.dummy_sample
_a : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(__snake_case , '''set_timesteps''' ):
scheduler.set_timesteps(__snake_case )
elif num_inference_steps is not None and not hasattr(__snake_case , '''set_timesteps''' ):
_a : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_a : int = [residual + 0.2, residual + 0.15, residual + 0.10]
_a : str = dummy_past_residuals[: scheduler.config.solver_order]
_a : Any = scheduler.timesteps[5]
_a : int = scheduler.timesteps[6]
_a : List[Any] = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
_a : Any = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self : Any ) -> List[str]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_a : Dict = DEISMultistepScheduler(**self.get_scheduler_config() )
_a : Any = self.full_loop(scheduler=__snake_case )
_a : Tuple = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
_a : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_a : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_a : List[str] = UniPCMultistepScheduler.from_config(scheduler.config )
_a : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
_a : Tuple = self.full_loop(scheduler=__snake_case )
_a : int = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def snake_case_ ( self : Optional[int] ) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__snake_case )
def snake_case_ ( self : int ) -> Optional[int]:
self.check_over_configs(thresholding=__snake_case )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__snake_case , prediction_type=__snake_case , sample_max_value=__snake_case , algorithm_type='''deis''' , solver_order=__snake_case , solver_type=__snake_case , )
def snake_case_ ( self : Union[str, Any] ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def snake_case_ ( self : Optional[Any] ) -> List[Any]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__snake_case , solver_type=__snake_case , prediction_type=__snake_case , algorithm_type=__snake_case , )
_a : Any = self.full_loop(
solver_order=__snake_case , solver_type=__snake_case , prediction_type=__snake_case , algorithm_type=__snake_case , )
assert not torch.isnan(__snake_case ).any(), "Samples have nan numbers"
def snake_case_ ( self : str ) -> Any:
self.check_over_configs(lower_order_final=__snake_case )
self.check_over_configs(lower_order_final=__snake_case )
def snake_case_ ( self : str ) -> List[str]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__snake_case , time_step=0 )
def snake_case_ ( self : str ) -> Union[str, Any]:
_a : int = self.full_loop()
_a : Union[str, Any] = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def snake_case_ ( self : Optional[int] ) -> Union[str, Any]:
_a : int = self.full_loop(prediction_type='''v_prediction''' )
_a : List[str] = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def snake_case_ ( self : str ) -> Dict:
_a : str = self.scheduler_classes[0]
_a : Dict = self.get_scheduler_config(thresholding=__snake_case , dynamic_thresholding_ratio=0 )
_a : Any = scheduler_class(**__snake_case )
_a : Any = 10
_a : str = self.dummy_model()
_a : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
_a : Any = model(__snake_case , __snake_case )
_a : Any = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
assert sample.dtype == torch.floataa
| 471 |
import random
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : str = num - 1
_a : int = 0
while s % 2 == 0:
_a : Optional[int] = s // 2
t += 1
for _ in range(5 ):
_a : int = random.randrange(2 , num - 1 )
_a : Tuple = pow(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if v != 1:
_a : str = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_a : str = i + 1
_a : str = (v**2) % num
return True
def lowerCamelCase_ ( UpperCamelCase_ ):
if num < 2:
return False
_a : Optional[int] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ = 1024 ):
while True:
_a : Optional[int] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(UpperCamelCase_ ):
return num
if __name__ == "__main__":
__UpperCAmelCase : Union[str, Any] = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 471 | 1 |
"""simple docstring"""
__UpperCAmelCase = 'Tobias Carryer'
from time import time
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A , __A , __A=int(time() ) ) -> List[Any]: # noqa: B008
lowerCAmelCase_ :int = multiplier
lowerCAmelCase_ :str = increment
lowerCAmelCase_ :Optional[int] = modulo
lowerCAmelCase_ :List[str] = seed
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__UpperCAmelCase = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 707 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( lowercase__ : str ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(lowercase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase__ ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[str] = ["pixel_values"]
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BILINEAR , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , **__A , ) -> None:
super().__init__(**__A )
lowerCAmelCase_ :Tuple = size if size is not None else {"""shortest_edge""": 224}
lowerCAmelCase_ :Dict = get_size_dict(__A , default_to_square=__A )
lowerCAmelCase_ :int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCAmelCase_ :Tuple = get_size_dict(__A , param_name="""crop_size""" )
lowerCAmelCase_ :Union[str, Any] = do_resize
lowerCAmelCase_ :Optional[int] = size
lowerCAmelCase_ :Union[str, Any] = do_center_crop
lowerCAmelCase_ :Union[str, Any] = crop_size
lowerCAmelCase_ :Optional[Any] = resample
lowerCAmelCase_ :int = do_rescale
lowerCAmelCase_ :Dict = rescale_factor
lowerCAmelCase_ :List[str] = do_normalize
lowerCAmelCase_ :Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ :List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self , __A , __A , __A = PILImageResampling.BILINEAR , __A = None , **__A , ) -> np.ndarray:
lowerCAmelCase_ :List[Any] = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" in size:
lowerCAmelCase_ :Optional[Any] = get_resize_output_image_size(__A , size["""shortest_edge"""] , default_to_square=__A )
elif "height" in size and "width" in size:
lowerCAmelCase_ :Any = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
lowerCAmelCase_ :Any = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> Optional[int]:
return rescale(__A , scale=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase_ :List[Any] = to_numpy_array(__A )
if do_resize:
lowerCAmelCase_ :List[Any] = self.resize(image=__A , size=__A , resample=__A )
if do_center_crop:
lowerCAmelCase_ :List[Any] = self.center_crop(__A , size=__A )
if do_rescale:
lowerCAmelCase_ :int = self.rescale(image=__A , scale=__A )
if do_normalize:
lowerCAmelCase_ :str = self.normalize(image=__A , mean=__A , std=__A )
lowerCAmelCase_ :Tuple = to_channel_dimension_format(__A , __A )
return image
def __lowerCAmelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> PIL.Image.Image:
lowerCAmelCase_ :Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ :int = resample if resample is not None else self.resample
lowerCAmelCase_ :List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ :Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ :Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ :Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ :Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ :Tuple = image_std if image_std is not None else self.image_std
lowerCAmelCase_ :Tuple = size if size is not None else self.size
lowerCAmelCase_ :str = get_size_dict(__A , default_to_square=__A )
lowerCAmelCase_ :Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ :List[str] = get_size_dict(__A , param_name="""crop_size""" )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowerCAmelCase_ :List[Any] = make_batched(__A )
lowerCAmelCase_ :Dict = [
[
self._preprocess_image(
image=__A , do_resize=__A , size=__A , resample=__A , do_center_crop=__A , crop_size=__A , do_rescale=__A , rescale_factor=__A , do_normalize=__A , image_mean=__A , image_std=__A , data_format=__A , )
for img in video
]
for video in videos
]
lowerCAmelCase_ :Optional[Any] = {"""pixel_values""": videos}
return BatchFeature(data=__A , tensor_type=__A )
| 256 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowercase :
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.0_2 , A__=3 , A__=4 , A__=None , ) -> int:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def UpperCamelCase ( self ) -> Tuple:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = ids_tensor([self.batch_size] , self.num_choices )
snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self ) -> Optional[int]:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Tuple:
snake_case = NystromformerModel(config=A__ )
model.to(A__ )
model.eval()
snake_case = model(A__ , attention_mask=A__ , token_type_ids=A__ )
snake_case = model(A__ , token_type_ids=A__ )
snake_case = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = NystromformerForMaskedLM(config=A__ )
model.to(A__ )
model.eval()
snake_case = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]:
snake_case = NystromformerForQuestionAnswering(config=A__ )
model.to(A__ )
model.eval()
snake_case = model(
A__ , attention_mask=A__ , token_type_ids=A__ , start_positions=A__ , end_positions=A__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]:
snake_case = self.num_labels
snake_case = NystromformerForSequenceClassification(A__ )
model.to(A__ )
model.eval()
snake_case = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]:
snake_case = self.num_labels
snake_case = NystromformerForTokenClassification(config=A__ )
model.to(A__ )
model.eval()
snake_case = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]:
snake_case = self.num_choices
snake_case = NystromformerForMultipleChoice(config=A__ )
model.to(A__ )
model.eval()
snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> List[Any]:
snake_case = NystromformerModelTester(self )
snake_case = ConfigTester(self , config_class=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> Optional[int]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> int:
snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__ )
def UpperCamelCase ( self ) -> Dict:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
def UpperCamelCase ( self ) -> Optional[int]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
@slow
def UpperCamelCase ( self ) -> Tuple:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = NystromformerModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
snake_case = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
snake_case = model(A__ )[0]
snake_case = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , A__ )
snake_case = torch.tensor(
[[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=1e-4 ) )
@slow
def UpperCamelCase ( self ) -> List[str]:
snake_case = '''the [MASK] of Belgium is Brussels'''
snake_case = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
snake_case = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
snake_case = tokenizer(A__ , return_tensors='''pt''' )
with torch.no_grad():
snake_case = model(encoding.input_ids ).logits
snake_case = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(A__ ) , '''capital''' )
| 342 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def __UpperCamelCase ( a : float , a : float , a : float ) ->tuple:
snake_case = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 1 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCamelCase_ = 25_00_04
UpperCamelCase_ = 25_00_20
@require_sentencepiece
@require_tokenizers
class snake_case_ ( a, unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = MBartTokenizer
__UpperCamelCase = MBartTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def __UpperCAmelCase ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ =MBartTokenizer(A_, keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =MBartTokenizer(A_, keep_accents=A_ )
UpperCAmelCase__ =tokenizer.tokenize("This is a test" )
self.assertListEqual(A_, ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
UpperCAmelCase__ =tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A_, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
UpperCAmelCase__ =tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
UpperCAmelCase__ =tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
], )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase__ =(self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ =self.rust_tokenizer_class.from_pretrained(A_, **A_ )
UpperCAmelCase__ =self.tokenizer_class.from_pretrained(A_, **A_ )
UpperCAmelCase__ =tempfile.mkdtemp()
UpperCAmelCase__ =tokenizer_r.save_pretrained(A_ )
UpperCAmelCase__ =tokenizer_p.save_pretrained(A_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCAmelCase__ =tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(A_, A_ )
# Checks everything loads correctly in the same way
UpperCAmelCase__ =tokenizer_r.from_pretrained(A_ )
UpperCAmelCase__ =tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_, A_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A_ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ =tempfile.mkdtemp()
UpperCAmelCase__ =tokenizer_r.save_pretrained(A_, legacy_format=A_ )
UpperCAmelCase__ =tokenizer_p.save_pretrained(A_ )
# Checks it save with the same files
self.assertSequenceEqual(A_, A_ )
# Checks everything loads correctly in the same way
UpperCAmelCase__ =tokenizer_r.from_pretrained(A_ )
UpperCAmelCase__ =tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_, A_ ) )
shutil.rmtree(A_ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ =tempfile.mkdtemp()
UpperCAmelCase__ =tokenizer_r.save_pretrained(A_, legacy_format=A_ )
UpperCAmelCase__ =tokenizer_p.save_pretrained(A_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ =tokenizer_r.from_pretrained(A_ )
UpperCAmelCase__ =tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_, A_ ) )
shutil.rmtree(A_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = 'facebook/mbart-large-en-ro'
__UpperCamelCase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
__UpperCamelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
__UpperCamelCase = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def __UpperCAmelCase ( cls ) -> int:
UpperCAmelCase__ =MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang="en_XX", tgt_lang="ro_RO" )
UpperCAmelCase__ =1
return cls
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"], 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"], 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"], 25_0020 )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, A_ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
self.assertIn(A_, self.tokenizer.all_special_ids )
UpperCAmelCase__ =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
UpperCAmelCase__ =self.tokenizer.decode(A_, skip_special_tokens=A_ )
UpperCAmelCase__ =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=A_ )
self.assertEqual(A_, A_ )
self.assertNotIn(self.tokenizer.eos_token, A_ )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ =["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0], A_ )
UpperCAmelCase__ =10
UpperCAmelCase__ =self.tokenizer(A_, max_length=A_, truncation=A_ ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], A_ )
self.assertEqual(len(A_ ), A_ )
def __UpperCAmelCase ( self ) -> str:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ), [25_0026, 25_0001] )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase__ =tempfile.mkdtemp()
UpperCAmelCase__ =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A_ )
UpperCAmelCase__ =MBartTokenizer.from_pretrained(A_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, A_ )
@require_torch
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase__ =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=A_, return_tensors="pt" )
UpperCAmelCase__ =shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=A_, truncation=A_, max_length=len(self.expected_src_tokens ), return_tensors="pt", )
UpperCAmelCase__ =shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id )
self.assertIsInstance(A_, A_ )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
UpperCAmelCase__ =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, A_ )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase__ =self.tokenizer(self.src_text, padding=A_, truncation=A_, max_length=3, return_tensors="pt" )
UpperCAmelCase__ =self.tokenizer(
text_target=self.tgt_text, padding=A_, truncation=A_, max_length=10, return_tensors="pt" )
UpperCAmelCase__ =targets["input_ids"]
UpperCAmelCase__ =shift_tokens_right(A_, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase__ =self.tokenizer._build_translation_inputs(
"A test", return_tensors="pt", src_lang="en_XX", tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(A_ ), {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 25_0004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_0001,
}, )
| 700 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'blip_text_model'
def __init__( self, A_=3_0524, A_=768, A_=768, A_=3072, A_=768, A_=12, A_=8, A_=512, A_="gelu", A_=1E-12, A_=0.0, A_=0.0, A_=0.02, A_=3_0522, A_=2, A_=0, A_=102, A_=True, A_=True, **A_, ) -> Any:
super().__init__(
pad_token_id=A_, bos_token_id=A_, eos_token_id=A_, sep_token_id=A_, **A_, )
UpperCAmelCase__ =vocab_size
UpperCAmelCase__ =hidden_size
UpperCAmelCase__ =encoder_hidden_size
UpperCAmelCase__ =intermediate_size
UpperCAmelCase__ =projection_dim
UpperCAmelCase__ =hidden_dropout_prob
UpperCAmelCase__ =num_hidden_layers
UpperCAmelCase__ =num_attention_heads
UpperCAmelCase__ =max_position_embeddings
UpperCAmelCase__ =layer_norm_eps
UpperCAmelCase__ =hidden_act
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =attention_probs_dropout_prob
UpperCAmelCase__ =is_decoder
UpperCAmelCase__ =use_cache
@classmethod
def __UpperCAmelCase ( cls, A_, **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
UpperCAmelCase__ , UpperCAmelCase__ =cls.get_config_dict(A_, **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
UpperCAmelCase__ =config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A_, **A_ )
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'blip_vision_model'
def __init__( self, A_=768, A_=3072, A_=512, A_=12, A_=12, A_=384, A_=16, A_="gelu", A_=1E-5, A_=0.0, A_=1E-10, **A_, ) -> Dict:
super().__init__(**A_ )
UpperCAmelCase__ =hidden_size
UpperCAmelCase__ =intermediate_size
UpperCAmelCase__ =projection_dim
UpperCAmelCase__ =num_hidden_layers
UpperCAmelCase__ =num_attention_heads
UpperCAmelCase__ =patch_size
UpperCAmelCase__ =image_size
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =attention_dropout
UpperCAmelCase__ =layer_norm_eps
UpperCAmelCase__ =hidden_act
@classmethod
def __UpperCAmelCase ( cls, A_, **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
UpperCAmelCase__ , UpperCAmelCase__ =cls.get_config_dict(A_, **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
UpperCAmelCase__ =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A_, **A_ )
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'blip'
__UpperCamelCase = True
def __init__( self, A_=None, A_=None, A_=512, A_=2.65_92, A_=256, **A_, ) -> str:
super().__init__(**A_ )
if text_config is None:
UpperCAmelCase__ ={}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
UpperCAmelCase__ ={}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
UpperCAmelCase__ =BlipTextConfig(**A_ )
UpperCAmelCase__ =BlipVisionConfig(**A_ )
UpperCAmelCase__ =self.vision_config.hidden_size
UpperCAmelCase__ =projection_dim
UpperCAmelCase__ =logit_scale_init_value
UpperCAmelCase__ =1.0
UpperCAmelCase__ =0.02
UpperCAmelCase__ =image_text_hidden_size
@classmethod
def __UpperCAmelCase ( cls, A_, A_, **A_ ) -> Tuple:
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **A_ )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =copy.deepcopy(self.__dict__ )
UpperCAmelCase__ =self.text_config.to_dict()
UpperCAmelCase__ =self.vision_config.to_dict()
UpperCAmelCase__ =self.__class__.model_type
return output
| 510 | 0 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase_ ( snake_case ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> List[Any]:
super().__init__(
UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
__lowercase : List[Any] = field
__lowercase : List[str] = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths}
__lowercase : Dict = Json(
cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , field=UpperCamelCase_ , **UpperCamelCase_ , )
def _lowerCamelCase ( self ) -> str:
# Build iterable dataset
if self.streaming:
__lowercase : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowercase : List[Any] = None
__lowercase : Any = None
__lowercase : Union[str, Any] = None
__lowercase : Optional[int] = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
__lowercase : str = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> str:
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
__lowercase : Any = dataset
__lowercase : Dict = path_or_buf
__lowercase : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowercase : List[str] = num_proc
__lowercase : Optional[Any] = '''utf-8'''
__lowercase : Tuple = to_json_kwargs
def _lowerCamelCase ( self ) -> int:
__lowercase : str = self.to_json_kwargs.pop('''path_or_buf''' , UpperCamelCase_ )
__lowercase : str = self.to_json_kwargs.pop('''orient''' , '''records''' )
__lowercase : List[Any] = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
__lowercase : Any = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
__lowercase : Tuple = self.to_json_kwargs.pop('''compression''' , UpperCamelCase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=UpperCamelCase_ ) as buffer:
__lowercase : List[str] = self._write(file_obj=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
__lowercase : Tuple = self._write(
file_obj=self.path_or_buf , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs )
return written
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]:
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase : Dict = args
__lowercase : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(UpperCamelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowercase : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **UpperCamelCase_ )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ , ) -> int:
__lowercase : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
__lowercase : str = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(UpperCamelCase_ )
else:
__lowercase ,__lowercase : Dict = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCamelCase_ , UpperCamelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(UpperCamelCase_ )
return written
| 76 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
a_ = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
a_ = {
'ctrl': 2_5_6,
}
a_ = {
'Pregnancy': 1_6_8_6_2_9,
'Christianity': 7_6_7_5,
'Explain': 1_0_6_4_2_3,
'Fitness': 6_3_4_4_0,
'Saving': 6_3_1_6_3,
'Ask': 2_7_1_7_1,
'Ass': 9_5_9_8_5,
'Joke': 1_6_3_5_0_9,
'Questions': 4_5_6_2_2,
'Thoughts': 4_9_6_0_5,
'Retail': 5_2_3_4_2,
'Feminism': 1_6_4_3_3_8,
'Writing': 1_1_9_9_2,
'Atheism': 1_9_2_2_6_3,
'Netflix': 4_8_6_1_6,
'Computing': 3_9_6_3_9,
'Opinion': 4_3_2_1_3,
'Alone': 4_4_9_6_7,
'Funny': 5_8_9_1_7,
'Gaming': 4_0_3_5_8,
'Human': 4_0_8_8,
'India': 1_3_3_1,
'Joker': 7_7_1_3_8,
'Diet': 3_6_2_0_6,
'Legal': 1_1_8_5_9,
'Norman': 4_9_3_9,
'Tip': 7_2_6_8_9,
'Weight': 5_2_3_4_3,
'Movies': 4_6_2_7_3,
'Running': 2_3_4_2_5,
'Science': 2_0_9_0,
'Horror': 3_7_7_9_3,
'Confession': 6_0_5_7_2,
'Finance': 1_2_2_5_0,
'Politics': 1_6_3_6_0,
'Scary': 1_9_1_9_8_5,
'Support': 1_2_6_5_4,
'Technologies': 3_2_5_1_6,
'Teenage': 6_6_1_6_0,
'Event': 3_2_7_6_9,
'Learned': 6_7_4_6_0,
'Notion': 1_8_2_7_7_0,
'Wikipedia': 3_7_5_8_3,
'Books': 6_6_6_5,
'Extract': 7_6_0_5_0,
'Confessions': 1_0_2_7_0_1,
'Conspiracy': 7_5_9_3_2,
'Links': 6_3_6_7_4,
'Narcissus': 1_5_0_4_2_5,
'Relationship': 5_4_7_6_6,
'Relationships': 1_3_4_7_9_6,
'Reviews': 4_1_6_7_1,
'News': 4_2_5_6,
'Translation': 2_6_8_2_0,
'multilingual': 1_2_8_4_0_6,
}
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Any = set()
__lowercase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase : Any = char
__lowercase : List[Any] = set(__UpperCamelCase )
return pairs
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =CONTROL_CODES
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="<unk>" , **UpperCamelCase_ ) -> int:
super().__init__(unk_token=UpperCamelCase_ , **UpperCamelCase_ )
with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
__lowercase : List[Any] = json.load(UpperCamelCase_ )
__lowercase : Any = {v: k for k, v in self.encoder.items()}
with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle:
__lowercase : Optional[Any] = merges_handle.read().split('''\n''' )[1:-1]
__lowercase : Optional[Any] = [tuple(merge.split() ) for merge in merges]
__lowercase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowercase : Optional[Any] = {}
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
return len(self.encoder )
def _lowerCamelCase ( self ) -> Tuple:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
if token in self.cache:
return self.cache[token]
__lowercase : str = tuple(UpperCamelCase_ )
__lowercase : str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
__lowercase : Optional[Any] = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__lowercase : Dict = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase ,__lowercase : Tuple = bigram
__lowercase : int = []
__lowercase : Union[str, Any] = 0
while i < len(UpperCamelCase_ ):
try:
__lowercase : Optional[int] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase : Tuple = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase : List[str] = tuple(UpperCamelCase_ )
__lowercase : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__lowercase : List[str] = get_pairs(UpperCamelCase_ )
__lowercase : Optional[Any] = '''@@ '''.join(UpperCamelCase_ )
__lowercase : Dict = word[:-4]
__lowercase : str = word
return word
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
__lowercase : List[Any] = []
__lowercase : int = re.findall(R'''\S+\n?''' , UpperCamelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(''' ''' ) ) )
return split_tokens
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]:
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> int:
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[int]:
__lowercase : Tuple = ''' '''.join(UpperCamelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' )
__lowercase : List[str] = 0
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__lowercase : Union[str, Any] = token_index
writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 76 | 1 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = 1000 ):
'''simple docstring'''
A_ = 3
A_ = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 713 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowercase = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowercase = [0, 25, 50]
__lowercase = [25, 50, 75]
__lowercase = fuzz.membership.trimf(X, abca)
__lowercase = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowercase = np.ones(75)
__lowercase = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowercase = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowercase = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowercase = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowercase = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowercase = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowercase = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowercase = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowercase = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 563 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 401 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__UpperCAmelCase = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__UpperCAmelCase = True
if a[i].islower():
__UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=_lowerCAmelCase , )
assert hasattr(self , 'env' )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = {
'enabled': True,
'processes_per_host': 8,
}
A_ : str = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
A_ : Dict = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
A_ : int = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=_lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=_lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=_lowerCAmelCase , py_version='py36' , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
TrainingJobAnalytics(_lowerCAmelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = self.create_estimator(_lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
A_ : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A_ : Dict = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
A_ : Tuple = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A_ : Optional[int] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _lowerCAmelCase )
| 709 | import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger()
def UpperCamelCase ( __lowercase : int ,__lowercase : str ,__lowercase : LevitConfig ,__lowercase : Path ,__lowercase : bool = True ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
A_ : int = timm.create_model('levit_128s' ,pretrained=__lowercase )
else:
A_ : str = timm.create_model('levit_128' ,pretrained=__lowercase )
if hidden_sizes == 1_92:
A_ : List[str] = timm.create_model('levit_192' ,pretrained=__lowercase )
if hidden_sizes == 2_56:
A_ : Optional[Any] = timm.create_model('levit_256' ,pretrained=__lowercase )
if hidden_sizes == 3_84:
A_ : Tuple = timm.create_model('levit_384' ,pretrained=__lowercase )
from_model.eval()
A_ : Dict = LevitForImageClassificationWithTeacher(__lowercase ).eval()
A_ : Union[str, Any] = OrderedDict()
A_ : Dict = from_model.state_dict()
A_ : Tuple = list(from_model.state_dict().keys() )
A_ : str = list(our_model.state_dict().keys() )
print(len(__lowercase ) ,len(__lowercase ) )
for i in range(len(__lowercase ) ):
A_ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowercase )
A_ : str = torch.randn((2, 3, 2_24, 2_24) )
A_ : str = from_model(__lowercase )
A_ : Optional[Any] = our_model(__lowercase ).logits
assert torch.allclose(__lowercase ,__lowercase ), "The model logits don't match the original one."
A_ : List[str] = name
print(__lowercase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
A_ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def UpperCamelCase ( __lowercase : Path ,__lowercase : str = None ,__lowercase : bool = True ):
'''simple docstring'''
A_ : Dict = 'imagenet-1k-id2label.json'
A_ : Optional[int] = 10_00
A_ : Optional[int] = (1, num_labels)
A_ : int = 'huggingface/label-files'
A_ : int = num_labels
A_ : Union[str, Any] = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) )
A_ : int = {int(__lowercase ): v for k, v in idalabel.items()}
A_ : List[str] = idalabel
A_ : str = {v: k for k, v in idalabel.items()}
A_ : int = partial(__lowercase ,num_labels=__lowercase ,idalabel=__lowercase ,labelaid=__lowercase )
A_ : Any = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
A_ : Tuple = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 8, 12] ,depths=[4, 4, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] ,num_attention_heads=[6, 9, 12] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0.1 ,),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] ,__lowercase ,names_to_config[model_name] ,__lowercase ,__lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 70 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Tuple = 'wavlm'
def __init__( self : Optional[int] , A_ : Dict=32 , A_ : str=7_68 , A_ : Optional[Any]=12 , A_ : str=12 , A_ : int=30_72 , A_ : Union[str, Any]="gelu" , A_ : Dict=0.1 , A_ : str=0.1 , A_ : Optional[int]=0.1 , A_ : Optional[int]=0.0 , A_ : Tuple=0.1 , A_ : int=0.1 , A_ : Optional[int]=0.02 , A_ : Optional[Any]=1e-5 , A_ : List[str]="group" , A_ : Any="gelu" , A_ : List[str]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , A_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , A_ : Tuple=(10, 3, 3, 3, 3, 2, 2) , A_ : Tuple=False , A_ : Optional[int]=1_28 , A_ : Union[str, Any]=16 , A_ : Any=3_20 , A_ : List[str]=8_00 , A_ : List[str]=False , A_ : List[str]=True , A_ : Union[str, Any]=0.05 , A_ : Tuple=10 , A_ : Dict=2 , A_ : int=0.0 , A_ : str=10 , A_ : Dict=3_20 , A_ : List[Any]=2 , A_ : Dict=0.1 , A_ : int=1_00 , A_ : List[str]=2_56 , A_ : Any=2_56 , A_ : Dict=0.1 , A_ : Union[str, Any]="mean" , A_ : List[str]=False , A_ : List[Any]=False , A_ : str=2_56 , A_ : Any=(5_12, 5_12, 5_12, 5_12, 15_00) , A_ : int=(5, 3, 3, 1, 1) , A_ : int=(1, 2, 3, 1, 1) , A_ : int=5_12 , A_ : Tuple=80 , A_ : int=0 , A_ : List[str]=1 , A_ : Tuple=2 , A_ : Dict=False , A_ : List[Any]=3 , A_ : Union[str, Any]=2 , A_ : str=3 , A_ : List[str]=None , **A_ : int , )-> int:
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
__UpperCamelCase = hidden_size
__UpperCamelCase = feat_extract_norm
__UpperCamelCase = feat_extract_activation
__UpperCamelCase = list(A_ )
__UpperCamelCase = list(A_ )
__UpperCamelCase = list(A_ )
__UpperCamelCase = conv_bias
__UpperCamelCase = num_buckets
__UpperCamelCase = max_bucket_distance
__UpperCamelCase = num_conv_pos_embeddings
__UpperCamelCase = num_conv_pos_embedding_groups
__UpperCamelCase = len(self.conv_dim )
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = feat_proj_dropout
__UpperCamelCase = final_dropout
__UpperCamelCase = layerdrop
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = initializer_range
__UpperCamelCase = num_ctc_classes
__UpperCamelCase = vocab_size
__UpperCamelCase = do_stable_layer_norm
__UpperCamelCase = use_weighted_layer_sum
__UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase = apply_spec_augment
__UpperCamelCase = mask_time_prob
__UpperCamelCase = mask_time_length
__UpperCamelCase = mask_time_min_masks
__UpperCamelCase = mask_feature_prob
__UpperCamelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
__UpperCamelCase = num_codevectors_per_group
__UpperCamelCase = num_codevector_groups
__UpperCamelCase = contrastive_logits_temperature
__UpperCamelCase = num_negatives
__UpperCamelCase = codevector_dim
__UpperCamelCase = proj_codevector_dim
__UpperCamelCase = diversity_loss_weight
# ctc loss
__UpperCamelCase = ctc_loss_reduction
__UpperCamelCase = ctc_zero_infinity
# adapter
__UpperCamelCase = add_adapter
__UpperCamelCase = adapter_kernel_size
__UpperCamelCase = adapter_stride
__UpperCamelCase = num_adapter_layers
__UpperCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase = list(A_ )
__UpperCamelCase = list(A_ )
__UpperCamelCase = list(A_ )
__UpperCamelCase = xvector_output_dim
@property
def A ( self : Optional[Any] )-> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 505 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_A = logging.getLogger(__name__)
def lowercase () -> List[str]:
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" ,type=_snake_case ,default="wikitext" ,help="Name of the training. Explore datasets at: hf.co/datasets." ,)
parser.add_argument(
"--dataset_config" ,type=_snake_case ,default="wikitext-103-raw-v1" ,help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" ,type=_snake_case ,default="sayakpaul/unigram-tokenizer-wikitext" ,help="Tokenizer identifier. Can be a local filepath or a Hub identifier." ,)
parser.add_argument(
"--shard_size" ,type=_snake_case ,default=1000 ,help="Number of entries to go in a single shard." ,)
parser.add_argument("--split" ,type=_snake_case ,default="train" ,choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" ,default=_snake_case ,type=_snake_case ,help="Limit the number of shards (used for debugging)." ,)
parser.add_argument(
"--max_length" ,type=_snake_case ,default=512 ,help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." ,)
parser.add_argument(
"--output_dir" ,default="tf-tpu" ,type=_snake_case ,help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." ,)
__UpperCamelCase = parser.parse_args()
return args
def lowercase (_snake_case ) -> List[Any]:
'''simple docstring'''
def fn(_snake_case ):
return tokenizer(examples["text"] )
return fn
def lowercase (_snake_case ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = []
for i in range(len(tokenized_data["input_ids"] ) ):
__UpperCamelCase = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
__UpperCamelCase = tf.train.Features(feature=_snake_case )
__UpperCamelCase = tf.train.Example(features=_snake_case )
__UpperCamelCase = example.SerializeToString()
records.append(_snake_case )
return records
def lowercase (_snake_case ) -> Dict:
'''simple docstring'''
__UpperCamelCase = datasets.load_dataset(args.dataset_name ,args.dataset_config ,split=args.split )
if args.limit is not None:
__UpperCamelCase = min(len(_snake_case ) ,args.limit )
__UpperCamelCase = dataset.select(range(_snake_case ) )
print(f"""Limiting the dataset to {args.limit} entries.""" )
__UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__UpperCamelCase = os.path.join(args.output_dir ,args.split )
if not os.path.exists(_snake_case ):
os.makedirs(_snake_case )
else:
__UpperCamelCase = os.path.join(args.output_dir ,args.split )
# Tokenize the whole dataset at once.
__UpperCamelCase = tokenize_function(_snake_case )
__UpperCamelCase = dataset.map(_snake_case ,batched=_snake_case ,num_proc=4 ,remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_snake_case ):
# Concatenate all texts.
__UpperCamelCase = {k: sum(examples[k] ,[] ) for k in examples.keys()}
__UpperCamelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__UpperCamelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__UpperCamelCase = {
k: [t[i : i + args.max_length] for i in range(0 ,_snake_case ,args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__UpperCamelCase = dataset_tokenized.map(_snake_case ,batched=_snake_case ,batch_size=1000 ,num_proc=4 )
__UpperCamelCase = 0
__UpperCamelCase = 0
for shard in range(0 ,len(_snake_case ) ,args.shard_size ):
__UpperCamelCase = grouped_dataset[shard : shard + args.shard_size]
__UpperCamelCase = len(dataset_snapshot["input_ids"] )
__UpperCamelCase = os.path.join(_snake_case ,f"""dataset-{shard_count}-{records_containing}.tfrecord""" )
__UpperCamelCase = get_serialized_examples(_snake_case )
with tf.io.TFRecordWriter(_snake_case ) as out_file:
for i in range(len(_snake_case ) ):
__UpperCamelCase = serialized_examples[i]
out_file.write(_snake_case )
print("Wrote file {} containing {} records".format(_snake_case ,_snake_case ) )
shard_count += 1
total_records += records_containing
with open(f"""split-{args.split}-records-count.txt""" ,"w" ) as f:
print(f"""Total {args.split} records: {total_records}""" ,file=_snake_case )
if __name__ == "__main__":
_A = parse_args()
main(args) | 505 | 1 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
__lowercase : int
__lowercase : int
__lowercase : float = 0.0
__lowercase : int = 1
__lowercase : int = 1
__lowercase : bool = True
__lowercase : bool = False
__lowercase : bool = False
__lowercase : bool = False
__lowercase : jnp.dtype = jnp.floataa
def A__ ( self ):
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for i in range(self.num_layers ):
UpperCAmelCase__ = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase__ = FlaxResnetBlockaD(
in_channels=__lowercase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__lowercase )
UpperCAmelCase__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__lowercase )
UpperCAmelCase__ = resnets
UpperCAmelCase__ = attentions
if self.add_downsample:
UpperCAmelCase__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __lowercase , __lowercase , __lowercase , __lowercase=True ):
UpperCAmelCase__ = ()
for resnet, attn in zip(self.resnets , self.attentions ):
UpperCAmelCase__ = resnet(__lowercase , __lowercase , deterministic=__lowercase )
UpperCAmelCase__ = attn(__lowercase , __lowercase , deterministic=__lowercase )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase__ = self.downsamplers_a(__lowercase )
output_states += (hidden_states,)
return hidden_states, output_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
__lowercase : int
__lowercase : int
__lowercase : float = 0.0
__lowercase : int = 1
__lowercase : bool = True
__lowercase : jnp.dtype = jnp.floataa
def A__ ( self ):
UpperCAmelCase__ = []
for i in range(self.num_layers ):
UpperCAmelCase__ = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase__ = FlaxResnetBlockaD(
in_channels=__lowercase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__lowercase )
UpperCAmelCase__ = resnets
if self.add_downsample:
UpperCAmelCase__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __lowercase , __lowercase , __lowercase=True ):
UpperCAmelCase__ = ()
for resnet in self.resnets:
UpperCAmelCase__ = resnet(__lowercase , __lowercase , deterministic=__lowercase )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase__ = self.downsamplers_a(__lowercase )
output_states += (hidden_states,)
return hidden_states, output_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
__lowercase : int
__lowercase : int
__lowercase : int
__lowercase : float = 0.0
__lowercase : int = 1
__lowercase : int = 1
__lowercase : bool = True
__lowercase : bool = False
__lowercase : bool = False
__lowercase : bool = False
__lowercase : jnp.dtype = jnp.floataa
def A__ ( self ):
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for i in range(self.num_layers ):
UpperCAmelCase__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase__ = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__lowercase )
UpperCAmelCase__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__lowercase )
UpperCAmelCase__ = resnets
UpperCAmelCase__ = attentions
if self.add_upsample:
UpperCAmelCase__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
UpperCAmelCase__ = res_hidden_states_tuple[-1]
UpperCAmelCase__ = res_hidden_states_tuple[:-1]
UpperCAmelCase__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCAmelCase__ = resnet(__lowercase , __lowercase , deterministic=__lowercase )
UpperCAmelCase__ = attn(__lowercase , __lowercase , deterministic=__lowercase )
if self.add_upsample:
UpperCAmelCase__ = self.upsamplers_a(__lowercase )
return hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
__lowercase : int
__lowercase : int
__lowercase : int
__lowercase : float = 0.0
__lowercase : int = 1
__lowercase : bool = True
__lowercase : jnp.dtype = jnp.floataa
def A__ ( self ):
UpperCAmelCase__ = []
for i in range(self.num_layers ):
UpperCAmelCase__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase__ = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__lowercase )
UpperCAmelCase__ = resnets
if self.add_upsample:
UpperCAmelCase__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __lowercase , __lowercase , __lowercase , __lowercase=True ):
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase__ = res_hidden_states_tuple[-1]
UpperCAmelCase__ = res_hidden_states_tuple[:-1]
UpperCAmelCase__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCAmelCase__ = resnet(__lowercase , __lowercase , deterministic=__lowercase )
if self.add_upsample:
UpperCAmelCase__ = self.upsamplers_a(__lowercase )
return hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
__lowercase : int
__lowercase : float = 0.0
__lowercase : int = 1
__lowercase : int = 1
__lowercase : bool = False
__lowercase : bool = False
__lowercase : jnp.dtype = jnp.floataa
def A__ ( self ):
# there is always at least one resnet
UpperCAmelCase__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
UpperCAmelCase__ = []
for _ in range(self.num_layers ):
UpperCAmelCase__ = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__lowercase )
UpperCAmelCase__ = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__lowercase )
UpperCAmelCase__ = resnets
UpperCAmelCase__ = attentions
def __call__( self , __lowercase , __lowercase , __lowercase , __lowercase=True ):
UpperCAmelCase__ = self.resnets[0](__lowercase , __lowercase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
UpperCAmelCase__ = attn(__lowercase , __lowercase , deterministic=__lowercase )
UpperCAmelCase__ = resnet(__lowercase , __lowercase , deterministic=__lowercase )
return hidden_states
| 720 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase__ = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase__ = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
UpperCAmelCase__ = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowercase , __lowercase )
def A__ ( self , **__lowercase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def A__ ( self , **__lowercase ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def A__ ( self ):
shutil.rmtree(self.tmpdirname )
def A__ ( self ):
UpperCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A__ ( self ):
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def A__ ( self ):
UpperCAmelCase__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase__ = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
UpperCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(__lowercase , return_tensors="""np""" )
UpperCAmelCase__ = processor(images=__lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = processor(text=__lowercase )
UpperCAmelCase__ = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(__lowercase ):
processor()
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ = processor.batch_decode(__lowercase )
UpperCAmelCase__ = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def A__ ( self ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 422 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
A : Tuple = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =["""input_features""", """attention_mask"""]
def __init__( self , __a=80 , __a=1_60_00 , __a=80 , __a=0.0 , __a=True , __a=True , __a=True , **__a , ):
super().__init__(feature_size=SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , padding_value=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = num_mel_bins
__lowerCAmelCase = do_ceptral_normalize
__lowerCAmelCase = normalize_means
__lowerCAmelCase = normalize_vars
__lowerCAmelCase = True
def snake_case ( self , __a , ):
__lowerCAmelCase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__lowerCAmelCase = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
__lowerCAmelCase = ta_kaldi.fbank(SCREAMING_SNAKE_CASE__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def snake_case ( __a , __a , __a = True , __a = True , __a = 0.0 , ):
if normalize_means:
__lowerCAmelCase = x[:input_length].mean(axis=0 )
__lowerCAmelCase = np.subtract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if normalize_vars:
__lowerCAmelCase = x[:input_length].std(axis=0 )
__lowerCAmelCase = np.divide(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if input_length < x.shape[0]:
__lowerCAmelCase = padding_value
# make sure array is in float32
__lowerCAmelCase = x.astype(np.floataa )
return x
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
def __call__( self , __a , __a = False , __a = None , __a = False , __a = None , __a = None , __a = None , __a = None , **__a , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__lowerCAmelCase = isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
__lowerCAmelCase = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
__lowerCAmelCase = np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCAmelCase = [raw_speech]
# extract fbank features
__lowerCAmelCase = [self._extract_fbank_features(SCREAMING_SNAKE_CASE__ ) for waveform in raw_speech]
# convert into correct format for padding
__lowerCAmelCase = BatchFeature({"input_features": features} )
__lowerCAmelCase = self.pad(
SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
# make sure list is in array format
__lowerCAmelCase = padded_inputs.get("input_features" )
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa ) for feature in input_features]
__lowerCAmelCase = padded_inputs.get("attention_mask" )
if attention_mask is not None:
__lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__lowerCAmelCase = (
np.array(SCREAMING_SNAKE_CASE__ , dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__lowerCAmelCase = self.normalize(
padded_inputs["input_features"] , attention_mask=SCREAMING_SNAKE_CASE__ )
if return_tensors is not None:
__lowerCAmelCase = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE__ )
return padded_inputs
| 636 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase__ :Dict = """true"""
def A_ ( snake_case__ , snake_case__=82 , snake_case__=16 ) -> List[Any]:
set_seed(42 )
_UpperCamelCase :Dict = RegressionModel()
_UpperCamelCase :Dict = deepcopy(snake_case__ )
_UpperCamelCase :List[Any] = RegressionDataset(length=snake_case__ )
_UpperCamelCase :Optional[int] = DataLoader(snake_case__ , batch_size=snake_case__ )
model.to(accelerator.device )
_UpperCamelCase , _UpperCamelCase :List[str] = accelerator.prepare(snake_case__ , snake_case__ )
return model, ddp_model, dataloader
def A_ ( snake_case__ , snake_case__=False ) -> Optional[int]:
_UpperCamelCase :Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
_UpperCamelCase :int = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(snake_case__ ):
_UpperCamelCase :Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
with accelerator.main_process_first():
_UpperCamelCase :Optional[int] = dataset.map(
snake_case__ , batched=snake_case__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
_UpperCamelCase :Tuple = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case__ ):
if use_longest:
return tokenizer.pad(snake_case__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(snake_case__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(snake_case__ , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=16 )
def A_ ( snake_case__ , snake_case__ ) -> Tuple:
_UpperCamelCase :Union[str, Any] = Accelerator(dispatch_batches=snake_case__ , split_batches=snake_case__ )
_UpperCamelCase :Any = get_dataloader(snake_case__ , not dispatch_batches )
_UpperCamelCase :Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=snake_case__ )
_UpperCamelCase , _UpperCamelCase :int = accelerator.prepare(snake_case__ , snake_case__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def A_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
_UpperCamelCase :Any = []
for batch in dataloader:
_UpperCamelCase , _UpperCamelCase :List[Any] = batch.values()
with torch.no_grad():
_UpperCamelCase :List[Any] = model(snake_case__ )
_UpperCamelCase , _UpperCamelCase :Optional[int] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_UpperCamelCase , _UpperCamelCase :Optional[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case__ )
targs.append(snake_case__ )
_UpperCamelCase , _UpperCamelCase :List[str] = torch.cat(snake_case__ ), torch.cat(snake_case__ )
return logits, targs
def A_ ( snake_case__ , snake_case__=82 , snake_case__=False , snake_case__=False , snake_case__=16 ) -> Dict:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :List[str] = get_basic_setup(snake_case__ , snake_case__ , snake_case__ )
_UpperCamelCase , _UpperCamelCase :Tuple = generate_predictions(snake_case__ , snake_case__ , snake_case__ )
assert (
len(snake_case__ ) == num_samples
), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case__ )}"
def A_ ( snake_case__ = False , snake_case__ = False ) -> Optional[Any]:
_UpperCamelCase :Any = evaluate.load('''glue''' , '''mrpc''' )
_UpperCamelCase , _UpperCamelCase :Optional[int] = get_mrpc_setup(snake_case__ , snake_case__ )
# First do baseline
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :Tuple = setup['''no''']
model.to(snake_case__ )
model.eval()
for batch in dataloader:
batch.to(snake_case__ )
with torch.inference_mode():
_UpperCamelCase :str = model(**snake_case__ )
_UpperCamelCase :Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case__ , references=batch['''labels'''] )
_UpperCamelCase :int = metric.compute()
# Then do distributed
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :Optional[int] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
_UpperCamelCase :Dict = model(**snake_case__ )
_UpperCamelCase :str = outputs.logits.argmax(dim=-1 )
_UpperCamelCase :Union[str, Any] = batch['''labels''']
_UpperCamelCase , _UpperCamelCase :Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case__ , references=snake_case__ )
_UpperCamelCase :Optional[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def A_ ( ) -> Optional[Any]:
_UpperCamelCase :int = Accelerator(split_batches=snake_case__ , dispatch_batches=snake_case__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(snake_case__ , snake_case__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_UpperCamelCase :Tuple = Accelerator(split_batches=snake_case__ , dispatch_batches=snake_case__ )
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(snake_case__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
_UpperCamelCase :Any = Accelerator()
test_torch_metrics(snake_case__ , 5_12 )
accelerator.state._reset_state()
def A_ ( snake_case__ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 355 | 0 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : int = logging.get_logger(__name__)
_A : Dict = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = "align_text_model"
def __init__( self : Optional[int] , A : Optional[int]=3_0_5_2_2 , A : List[str]=7_6_8 , A : Tuple=1_2 , A : int=1_2 , A : int=3_0_7_2 , A : int="gelu" , A : str=0.1 , A : Tuple=0.1 , A : List[Any]=5_1_2 , A : List[Any]=2 , A : Tuple=0.02 , A : str=1e-12 , A : int=0 , A : Union[str, Any]="absolute" , A : Dict=True , **A : str , ) ->int:
super().__init__(**A )
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : int = layer_norm_eps
lowerCamelCase__ : Tuple = position_embedding_type
lowerCamelCase__ : Tuple = use_cache
lowerCamelCase__ : Dict = pad_token_id
@classmethod
def __lowerCamelCase ( cls : Optional[Any] , A : Union[str, os.PathLike] , **A : int ) ->"PretrainedConfig":
cls._set_token_in_kwargs(A )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = cls.get_config_dict(A , **A )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
lowerCamelCase__ : List[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A , **A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = "align_vision_model"
def __init__( self : List[Any] , A : int = 3 , A : int = 6_0_0 , A : float = 2.0 , A : float = 3.1 , A : int = 8 , A : List[int] = [3, 3, 5, 3, 5, 5, 3] , A : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , A : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , A : List[int] = [] , A : List[int] = [1, 2, 2, 2, 1, 2, 1] , A : List[int] = [1, 2, 2, 3, 3, 4, 1] , A : List[int] = [1, 6, 6, 6, 6, 6, 6] , A : float = 0.25 , A : str = "swish" , A : int = 2_5_6_0 , A : str = "mean" , A : float = 0.02 , A : float = 0.0_01 , A : float = 0.99 , A : float = 0.2 , **A : Dict , ) ->Tuple:
super().__init__(**A )
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : List[str] = image_size
lowerCamelCase__ : List[str] = width_coefficient
lowerCamelCase__ : Union[str, Any] = depth_coefficient
lowerCamelCase__ : Any = depth_divisor
lowerCamelCase__ : Union[str, Any] = kernel_sizes
lowerCamelCase__ : List[str] = in_channels
lowerCamelCase__ : List[Any] = out_channels
lowerCamelCase__ : Tuple = depthwise_padding
lowerCamelCase__ : Dict = strides
lowerCamelCase__ : Dict = num_block_repeats
lowerCamelCase__ : List[str] = expand_ratios
lowerCamelCase__ : List[str] = squeeze_expansion_ratio
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : List[Any] = hidden_dim
lowerCamelCase__ : Dict = pooling_type
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : List[str] = batch_norm_eps
lowerCamelCase__ : Dict = batch_norm_momentum
lowerCamelCase__ : int = drop_connect_rate
lowerCamelCase__ : int = sum(A ) * 4
@classmethod
def __lowerCamelCase ( cls : Optional[int] , A : Union[str, os.PathLike] , **A : Any ) ->"PretrainedConfig":
cls._set_token_in_kwargs(A )
lowerCamelCase__ , lowerCamelCase__ : Any = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
lowerCamelCase__ : List[str] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A , **A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : List[Any] = "align"
_UpperCAmelCase : List[str] = True
def __init__( self : Dict , A : Dict=None , A : str=None , A : Tuple=6_4_0 , A : Optional[int]=1.0 , A : str=0.02 , **A : Dict , ) ->Optional[int]:
super().__init__(**A )
if text_config is None:
lowerCamelCase__ : Optional[int] = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
lowerCamelCase__ : str = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
lowerCamelCase__ : List[str] = AlignTextConfig(**A )
lowerCamelCase__ : Any = AlignVisionConfig(**A )
lowerCamelCase__ : int = projection_dim
lowerCamelCase__ : List[Any] = temperature_init_value
lowerCamelCase__ : Optional[Any] = initializer_range
@classmethod
def __lowerCamelCase ( cls : List[Any] , A : AlignTextConfig , A : AlignVisionConfig , **A : Dict ) ->Optional[int]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A )
def __lowerCamelCase ( self : List[Any] ) ->int:
lowerCamelCase__ : Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : List[str] = self.text_config.to_dict()
lowerCamelCase__ : Optional[int] = self.vision_config.to_dict()
lowerCamelCase__ : str = self.__class__.model_type
return output
| 130 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowerCamelCase__ : int = str(bin(UpperCAmelCase ) )[2:] # remove the leading "0b"
lowerCamelCase__ : Optional[int] = str(bin(UpperCAmelCase ) )[2:]
lowerCamelCase__ : str = max(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase ) , b_binary.zfill(UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 130 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
SCREAMING_SNAKE_CASE = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def lowercase_ ( __A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[int] ={}
with open(__A , '''r''' ) as file:
for line_number, line in enumerate(__A ):
lowercase : List[Any] =line.strip()
if line:
lowercase : int =line.split()
lowercase : int =line_number
lowercase : str =words[0]
lowercase : Dict =value
return result
def lowercase_ ( __A : str , __A : Any , __A : Tuple , __A : Dict , __A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase : Optional[Any] =getattr(__A , __A )
lowercase : Dict =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
lowercase : Optional[int] =PARAM_MAPPING[full_name.split('''.''' )[-1]]
lowercase : int ='''param'''
if weight_type is not None and weight_type != "param":
lowercase : Union[str, Any] =getattr(__A , __A ).shape
elif weight_type is not None and weight_type == "param":
lowercase : Dict =hf_pointer
for attribute in hf_param_name.split('''.''' ):
lowercase : Optional[int] =getattr(__A , __A )
lowercase : List[str] =shape_pointer.shape
# let's reduce dimension
lowercase : Any =value[0]
else:
lowercase : int =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowercase : int =value
elif weight_type == "weight_g":
lowercase : Optional[int] =value
elif weight_type == "weight_v":
lowercase : str =value
elif weight_type == "bias":
lowercase : str =value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
lowercase : Union[str, Any] =getattr(__A , __A )
lowercase : List[Any] =value
else:
lowercase : List[str] =value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowercase_ ( __A : Union[str, Any] , __A : int , __A : Optional[int] , __A : str , __A : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
lowercase : Tuple =PARAM_MAPPING[full_name.split('''.''' )[-1]]
lowercase : List[str] ='''param'''
if weight_type is not None and weight_type != "param":
lowercase : List[str] ='''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowercase : List[Any] ='''.'''.join([key, hf_param_name] )
else:
lowercase : List[Any] =key
lowercase : Any =value if '''lm_head''' in full_key else value[0]
SCREAMING_SNAKE_CASE = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def lowercase_ ( __A : List[str] , __A : Optional[int] , __A : int=None , __A : str=None ) -> Optional[int]:
"""simple docstring"""
lowercase : Any =False
for key, mapped_key in MAPPING.items():
lowercase : Union[str, Any] ='''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : Dict =True
if "*" in mapped_key:
lowercase : Union[str, Any] =name.split(__A )[0].split('''.''' )[-2]
lowercase : Union[str, Any] =mapped_key.replace('''*''' , __A )
if "weight_g" in name:
lowercase : Optional[int] ='''weight_g'''
elif "weight_v" in name:
lowercase : Optional[Any] ='''weight_v'''
elif "bias" in name:
lowercase : Optional[int] ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : str ='''weight'''
else:
lowercase : str =None
if hf_dict is not None:
rename_dict(__A , __A , __A , __A , __A )
else:
set_recursively(__A , __A , __A , __A , __A )
return is_used
return is_used
def lowercase_ ( __A : List[str] , __A : Dict , __A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] =[]
lowercase : str =fairseq_model.state_dict()
lowercase : Tuple =hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowercase : List[str] =False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == '''group''' , )
lowercase : Tuple =True
else:
lowercase : Dict =load_wavaveca_layer(__A , __A , __A )
if not is_used:
unused_weights.append(__A )
logger.warning(F'Unused weights: {unused_weights}' )
def lowercase_ ( __A : Optional[Any] , __A : Tuple , __A : List[Any] , __A : Dict , __A : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase : List[str] =full_name.split('''conv_layers.''' )[-1]
lowercase : str =name.split('''.''' )
lowercase : Optional[int] =int(items[0] )
lowercase : Optional[int] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowercase : List[Any] =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowercase : str =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
lowercase : Union[str, Any] =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
lowercase : List[Any] =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__A )
@torch.no_grad()
def lowercase_ ( __A : Union[str, Any] , __A : Optional[int] , __A : str=None , __A : List[str]=None , __A : str=True , __A : List[Any]=False ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
lowercase : Union[str, Any] =WavaVecaConfig.from_pretrained(__A )
else:
lowercase : Optional[Any] =WavaVecaConfig()
if is_seq_class:
lowercase : Optional[Any] =read_txt_into_dict(__A )
lowercase : int =idalabel
lowercase : Dict =WavaVecaForSequenceClassification(__A )
lowercase : str =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
feature_extractor.save_pretrained(__A )
elif is_finetuned:
if dict_path:
lowercase : int =Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase : str =target_dict.pad_index
lowercase : List[str] =target_dict.bos_index
lowercase : int =target_dict.eos_index
lowercase : int =len(target_dict.symbols )
lowercase : Dict =os.path.join(__A , '''vocab.json''' )
if not os.path.isdir(__A ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__A ) )
return
os.makedirs(__A , exist_ok=__A )
lowercase : Dict =target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase : int =0
lowercase : Tuple =1
with open(__A , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__A , __A )
lowercase : Any =WavaVecaCTCTokenizer(
__A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__A , )
lowercase : List[str] =True if config.feat_extract_norm == '''layer''' else False
lowercase : str =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
lowercase : str =WavaVecaProcessor(feature_extractor=__A , tokenizer=__A )
processor.save_pretrained(__A )
lowercase : Optional[int] =WavaVecaForCTC(__A )
else:
lowercase : Tuple =WavaVecaForPreTraining(__A )
if is_finetuned or is_seq_class:
lowercase , lowercase , lowercase : Union[str, Any] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowercase : List[Any] =argparse.Namespace(task='''audio_pretraining''' )
lowercase : Any =fairseq.tasks.setup_task(__A )
lowercase , lowercase , lowercase : Any =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__A )
lowercase : Any =model[0].eval()
recursively_load_weights(__A , __A , not is_finetuned )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 94 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __a ( __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = 'beit'
def __init__( self , snake_case=8_192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3_072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1e-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ):
"""simple docstring"""
super().__init__(**snake_case )
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Dict = layer_norm_eps
lowerCAmelCase__ : int = image_size
lowerCAmelCase__ : Union[str, Any] = patch_size
lowerCAmelCase__ : Dict = num_channels
lowerCAmelCase__ : Optional[Any] = use_mask_token
lowerCAmelCase__ : Dict = use_absolute_position_embeddings
lowerCAmelCase__ : Any = use_relative_position_bias
lowerCAmelCase__ : List[Any] = use_shared_relative_position_bias
lowerCAmelCase__ : Dict = layer_scale_init_value
lowerCAmelCase__ : Optional[int] = drop_path_rate
lowerCAmelCase__ : Optional[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase__ : Optional[int] = out_indices
lowerCAmelCase__ : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase__ : List[Any] = use_auxiliary_head
lowerCAmelCase__ : Optional[int] = auxiliary_loss_weight
lowerCAmelCase__ : List[str] = auxiliary_channels
lowerCAmelCase__ : Optional[Any] = auxiliary_num_convs
lowerCAmelCase__ : Union[str, Any] = auxiliary_concat_input
lowerCAmelCase__ : List[str] = semantic_loss_ignore_index
class __a ( __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : List[str] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return 1e-4
| 453 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : int = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class A_ ( a_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = SpeechTaTokenizer
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
def _UpperCAmelCase ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = SpeechTaTokenizer(__SCREAMING_SNAKE_CASE )
__a = AddedToken("<mask>" , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )
__a = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a = "this is a test"
__a = "this is a test"
return input_text, output_text
def _UpperCAmelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Any=20 , __SCREAMING_SNAKE_CASE : int=5 ):
__a , __a = self.get_input_output_texts(__SCREAMING_SNAKE_CASE )
__a = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__a = tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
return text, ids
def _UpperCAmelCase ( self : Optional[Any] ):
__a = "<pad>"
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[int] ):
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 81 )
def _UpperCAmelCase ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def _UpperCAmelCase ( self : List[Any] ):
__a = self.get_tokenizers(do_lower_case=__SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__a = tokenizer.vocab_size
__a = len(__SCREAMING_SNAKE_CASE )
self.assertNotEqual(__SCREAMING_SNAKE_CASE , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__a = ["aaaaa bbbbbb", "cccccccccdddddddd"]
__a = tokenizer.add_tokens(__SCREAMING_SNAKE_CASE )
__a = tokenizer.vocab_size
__a = len(__SCREAMING_SNAKE_CASE )
self.assertNotEqual(__SCREAMING_SNAKE_CASE , 0 )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , all_size + len(__SCREAMING_SNAKE_CASE ) )
__a = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(len(__SCREAMING_SNAKE_CASE ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__a = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
__a = tokenizer.add_special_tokens(__SCREAMING_SNAKE_CASE )
__a = tokenizer.vocab_size
__a = len(__SCREAMING_SNAKE_CASE )
self.assertNotEqual(__SCREAMING_SNAKE_CASE , 0 )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , all_size_a + len(__SCREAMING_SNAKE_CASE ) )
__a = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(len(__SCREAMING_SNAKE_CASE ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def _UpperCAmelCase ( self : List[Any] ):
pass
def _UpperCAmelCase ( self : Union[str, Any] ):
pass
def _UpperCAmelCase ( self : Dict ):
__a = self.get_tokenizer()
__a = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
__a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
# fmt: off
self.assertListEqual(__SCREAMING_SNAKE_CASE , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _UpperCAmelCase ( self : Optional[int] ):
# Use custom sequence because this tokenizer does not handle numbers.
__a = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
__a = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=__SCREAMING_SNAKE_CASE , )
| 525 | from ... import PretrainedConfig
SCREAMING_SNAKE_CASE : Any = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
_SCREAMING_SNAKE_CASE = """nezha"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : List[str]=2_11_28 , __SCREAMING_SNAKE_CASE : Dict=7_68 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : Optional[Any]=30_72 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : int=5_12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=64 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-12 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Any=0 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=True , **__SCREAMING_SNAKE_CASE : List[str] , ):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = max_relative_position
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout
__a = use_cache
| 525 | 1 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __lt__( self , lowercase__ ) -> Dict:
return self[-1] < other[-1]
def __eq__( self , lowercase__ ) -> List[str]:
return self[-1] == other[-1]
def __lowerCAmelCase ( a_ ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE : list[Stack] = []
# sort into stacks
for element in collection:
SCREAMING_SNAKE_CASE : Dict = Stack([element] )
SCREAMING_SNAKE_CASE : int = bisect_left(a_ , a_ )
if i != len(a_ ):
stacks[i].append(a_ )
else:
stacks.append(a_ )
# use a heap-based merge to merge stack efficiently
SCREAMING_SNAKE_CASE : List[str] = merge(*(reversed(a_ ) for stack in stacks) )
return collection
if __name__ == "__main__":
_lowerCAmelCase :Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCAmelCase :Optional[int] = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 251 | '''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __lowerCAmelCase ( a_ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = int(number**0.5 )
return number == sq * sq
def __lowerCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> tuple[int, int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE : int = x_den * y_den * z_den
SCREAMING_SNAKE_CASE : int = gcd(a_ , a_ )
top //= hcf
bottom //= hcf
return top, bottom
def __lowerCAmelCase ( a_ = 35 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : set = set()
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Fraction = Fraction(0 )
SCREAMING_SNAKE_CASE : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
SCREAMING_SNAKE_CASE : Optional[Any] = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE : Optional[int] = x_den * y_den
SCREAMING_SNAKE_CASE : str = gcd(a_ , a_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE : List[Any] = add_three(
a_ , a_ , a_ , a_ , a_ , a_ )
unique_s.add(a_ )
# n=2
SCREAMING_SNAKE_CASE : List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE : Optional[int] = x_den * x_den * y_den * y_den
if is_sq(a_ ) and is_sq(a_ ):
SCREAMING_SNAKE_CASE : Dict = int(sqrt(a_ ) )
SCREAMING_SNAKE_CASE : int = int(sqrt(a_ ) )
SCREAMING_SNAKE_CASE : Any = gcd(a_ , a_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE : Dict = add_three(
a_ , a_ , a_ , a_ , a_ , a_ )
unique_s.add(a_ )
# n=-1
SCREAMING_SNAKE_CASE : Any = x_num * y_num
SCREAMING_SNAKE_CASE : List[str] = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE : List[Any] = gcd(a_ , a_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE : Optional[int] = add_three(
a_ , a_ , a_ , a_ , a_ , a_ )
unique_s.add(a_ )
# n=2
SCREAMING_SNAKE_CASE : Any = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE : Union[str, Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(a_ ) and is_sq(a_ ):
SCREAMING_SNAKE_CASE : str = int(sqrt(a_ ) )
SCREAMING_SNAKE_CASE : int = int(sqrt(a_ ) )
SCREAMING_SNAKE_CASE : Dict = gcd(a_ , a_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE : Optional[int] = add_three(
a_ , a_ , a_ , a_ , a_ , a_ )
unique_s.add(a_ )
for num, den in unique_s:
total += Fraction(a_ , a_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 251 | 1 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowercase_ = pytest.mark.integration
lowercase_ = {"comet"}
lowercase_ = importlib.util.find_spec("fairseq") is not None
lowercase_ = {"code_eval"}
lowercase_ = os.name == "nt"
lowercase_ = {"bertscore", "frugalscore", "perplexity"}
lowercase_ = importlib.util.find_spec("transformers") is not None
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
@wraps(__SCREAMING_SNAKE_CASE )
def wrapper(self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , __SCREAMING_SNAKE_CASE )
return wrapper
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
@wraps(__SCREAMING_SNAKE_CASE )
def wrapper(self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , __SCREAMING_SNAKE_CASE )
return wrapper
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
@wraps(__SCREAMING_SNAKE_CASE )
def wrapper(self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , __SCREAMING_SNAKE_CASE )
return wrapper
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@local
class SCREAMING_SNAKE_CASE__ ( parameterized.TestCase ):
A : List[Any] = {}
A : Tuple = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Optional[int] ):
__snake_case : List[str] = """[...]"""
__snake_case : Optional[int] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , _lowerCAmelCase ) ).module_path )
__snake_case : List[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=_lowerCAmelCase )
# check parameters
__snake_case : Dict = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_lowerCAmelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
__snake_case : Union[str, Any] = doctest.testmod(_lowerCAmelCase , verbose=_lowerCAmelCase , raise_on_error=_lowerCAmelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : List[str] ):
__snake_case : int = """[...]"""
__snake_case : Any = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , _lowerCAmelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
__snake_case : List[Any] = doctest.testmod(_lowerCAmelCase , verbose=_lowerCAmelCase , raise_on_error=_lowerCAmelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_lowerCAmelCase ):
yield
else:
yield
@contextmanager
def snake_case__ ( self : str ):
def load_local_metric(_lowerCAmelCase : Optional[Any] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : str ):
return load_metric(os.path.join("""metrics""" , _lowerCAmelCase ) , *_lowerCAmelCase , **_lowerCAmelCase )
with patch("""datasets.load_metric""" ) as mock_load_metric:
__snake_case : Optional[int] = load_local_metric
yield
@classmethod
def snake_case__ ( cls : List[Any] , _lowerCAmelCase : Tuple ):
def wrapper(_lowerCAmelCase : Dict ):
__snake_case : Union[str, Any] = contextmanager(_lowerCAmelCase )
__snake_case : Optional[Any] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def snake_case__ ( self : Any , _lowerCAmelCase : List[Any] ):
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
__snake_case : Tuple = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
import torch
def bert_cos_score_idf(__SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Any ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__SCREAMING_SNAKE_CASE ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
__snake_case : Dict = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
def load_from_checkpoint(__SCREAMING_SNAKE_CASE : int ):
class SCREAMING_SNAKE_CASE__ :
def snake_case__ ( self : str , _lowerCAmelCase : Dict , *_lowerCAmelCase : str , **_lowerCAmelCase : List[str] ):
assert len(_lowerCAmelCase ) == 2
__snake_case : Any = [0.19, 0.92]
return scores, sum(_lowerCAmelCase ) / len(_lowerCAmelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
__snake_case : Dict = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
__snake_case : List[Any] = load_from_checkpoint
yield
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
__snake_case : Optional[Any] = """ERROR"""
__snake_case : Union[str, Any] = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ):
metric.compute(predictions=[] , references=[] , scheme=__SCREAMING_SNAKE_CASE )
| 721 | import numpy as np
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : np.array ):
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 390 | 0 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase__ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
lowercase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def __lowerCamelCase ( __UpperCamelCase ) -> dict[str, int]:
"""simple docstring"""
lowerCAmelCase_ : str = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __lowerCamelCase ( __UpperCamelCase ) -> str:
"""simple docstring"""
return x[0]
def __lowerCamelCase ( __UpperCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase_ : List[str] = get_letter_count(__UpperCamelCase )
lowerCAmelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__UpperCamelCase )
lowerCAmelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__UpperCamelCase )
lowerCAmelCase_ : str = "".join(freq_to_letter[freq] )
lowerCAmelCase_ : int = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__UpperCamelCase , reverse=__UpperCamelCase )
lowerCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = get_frequency_order(__UpperCamelCase )
lowerCAmelCase_ : Any = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 610 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Optional[int] = ["""image_processor""", """tokenizer"""]
a_ : Union[str, Any] = """ViltImageProcessor"""
a_ : Dict = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Optional[Any]=None , **a_ : str ):
lowerCAmelCase_ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
lowerCAmelCase_ : Tuple = kwargs.pop("feature_extractor" )
lowerCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
lowerCAmelCase_ : str = self.image_processor
def __call__( self : int , a_ : List[Any] , a_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a_ : bool = True , a_ : Union[bool, str, PaddingStrategy] = False , a_ : Union[bool, str, TruncationStrategy] = None , a_ : Optional[int] = None , a_ : int = 0 , a_ : Optional[int] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = True , a_ : Optional[Union[str, TensorType]] = None , **a_ : Optional[Any] , ):
lowerCAmelCase_ : Dict = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
# add pixel_values + pixel_mask
lowerCAmelCase_ : Tuple = self.image_processor(a_ , return_tensors=a_ )
encoding.update(a_ )
return encoding
def lowerCamelCase ( self : Union[str, Any] , *a_ : Dict , **a_ : Union[str, Any] ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def lowerCamelCase ( self : Optional[Any] , *a_ : List[str] , **a_ : Any ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : Tuple = self.tokenizer.model_input_names
lowerCAmelCase_ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def lowerCamelCase ( self : Optional[int] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor
| 610 | 1 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_A : str =logging.getLogger(__name__)
def __UpperCamelCase ( _lowercase, _lowercase ) -> int:
_lowercase : Any = np.argmax(_lowercase, axis=1 )
return np.sum(outputs == labels )
def __UpperCamelCase ( _lowercase ) -> Union[str, Any]:
with open(_lowercase, encoding='utf_8' ) as f:
_lowercase : List[str] = csv.reader(_lowercase )
_lowercase : Tuple = []
next(_lowercase ) # skip the first line
for line in tqdm(_lowercase ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> Dict:
_lowercase : Any = []
for dataset in encoded_datasets:
_lowercase : int = len(_lowercase )
_lowercase : List[str] = np.zeros((n_batch, 2, input_len), dtype=np.intaa )
_lowercase : Optional[int] = np.zeros((n_batch, 2), dtype=np.intaa )
_lowercase : str = np.full((n_batch, 2, input_len), fill_value=-100, dtype=np.intaa )
_lowercase : Any = np.zeros((n_batch,), dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_lowercase ):
_lowercase : Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_lowercase : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_lowercase : Optional[int] = with_conta
_lowercase : List[Any] = with_conta
_lowercase : Optional[int] = len(_lowercase ) - 1
_lowercase : Optional[Any] = len(_lowercase ) - 1
_lowercase : List[Any] = with_conta
_lowercase : Tuple = with_conta
_lowercase : Any = mc_label
_lowercase : List[Any] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_lowercase ) for t in all_inputs ) )
return tensor_datasets
def __UpperCamelCase ( ) -> List[Any]:
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_name', type=_lowercase, default='openai-gpt', help='pretrained model name' )
parser.add_argument('--do_train', action='store_true', help='Whether to run training.' )
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir', default=_lowercase, type=_lowercase, required=_lowercase, help='The output directory where the model predictions and checkpoints will be written.', )
parser.add_argument('--train_dataset', type=_lowercase, default='' )
parser.add_argument('--eval_dataset', type=_lowercase, default='' )
parser.add_argument('--seed', type=_lowercase, default=42 )
parser.add_argument('--num_train_epochs', type=_lowercase, default=3 )
parser.add_argument('--train_batch_size', type=_lowercase, default=8 )
parser.add_argument('--eval_batch_size', type=_lowercase, default=16 )
parser.add_argument('--adam_epsilon', default=1E-8, type=_lowercase, help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm', type=_lowercase, default=1 )
parser.add_argument(
'--max_steps', default=-1, type=_lowercase, help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
), )
parser.add_argument(
'--gradient_accumulation_steps', type=_lowercase, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.', )
parser.add_argument('--learning_rate', type=_lowercase, default=6.2_5E-5 )
parser.add_argument('--warmup_steps', default=0, type=_lowercase, help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule', type=_lowercase, default='warmup_linear' )
parser.add_argument('--weight_decay', type=_lowercase, default=0.0_1 )
parser.add_argument('--lm_coef', type=_lowercase, default=0.9 )
parser.add_argument('--n_valid', type=_lowercase, default=374 )
parser.add_argument('--server_ip', type=_lowercase, default='', help='Can be used for distant debugging.' )
parser.add_argument('--server_port', type=_lowercase, default='', help='Can be used for distant debugging.' )
_lowercase : Dict = parser.parse_args()
print(_lowercase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=_lowercase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_lowercase : int = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_lowercase : Union[str, Any] = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(_lowercase, _lowercase ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_lowercase : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
_lowercase : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_lowercase )
_lowercase : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
_lowercase : Tuple = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_lowercase ) )
model.to(_lowercase )
# Load and encode the datasets
def tokenize_and_encode(_lowercase ):
if isinstance(_lowercase, _lowercase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_lowercase ) )
elif isinstance(_lowercase, _lowercase ):
return obj
return [tokenize_and_encode(_lowercase ) for o in obj]
logger.info('Encoding dataset...' )
_lowercase : Any = load_rocstories_dataset(args.train_dataset )
_lowercase : List[str] = load_rocstories_dataset(args.eval_dataset )
_lowercase : Dict = (train_dataset, eval_dataset)
_lowercase : Optional[int] = tokenize_and_encode(_lowercase )
# Compute the max input length for the Transformer
_lowercase : Optional[Any] = model.config.n_positions // 2 - 2
_lowercase : List[Any] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_lowercase : List[str] = min(_lowercase, model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_lowercase : Optional[int] = pre_process_datasets(_lowercase, _lowercase, _lowercase, *_lowercase )
_lowercase , _lowercase : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
_lowercase : Any = TensorDataset(*_lowercase )
_lowercase : Optional[Any] = RandomSampler(_lowercase )
_lowercase : Union[str, Any] = DataLoader(_lowercase, sampler=_lowercase, batch_size=args.train_batch_size )
_lowercase : Optional[int] = TensorDataset(*_lowercase )
_lowercase : List[Any] = SequentialSampler(_lowercase )
_lowercase : Optional[Any] = DataLoader(_lowercase, sampler=_lowercase, batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_lowercase : Tuple = args.max_steps
_lowercase : List[str] = args.max_steps // (len(_lowercase ) // args.gradient_accumulation_steps) + 1
else:
_lowercase : Dict = len(_lowercase ) // args.gradient_accumulation_steps * args.num_train_epochs
_lowercase : Optional[int] = list(model.named_parameters() )
_lowercase : Any = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
_lowercase : Tuple = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
_lowercase : Tuple = AdamW(_lowercase, lr=args.learning_rate, eps=args.adam_epsilon )
_lowercase : Optional[int] = get_linear_schedule_with_warmup(
_lowercase, num_warmup_steps=args.warmup_steps, num_training_steps=_lowercase )
if args.do_train:
_lowercase , _lowercase , _lowercase : int = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ), desc='Epoch' ):
_lowercase : Optional[Any] = 0
_lowercase : Union[str, Any] = 0
_lowercase : Dict = tqdm(_lowercase, desc='Training' )
for step, batch in enumerate(_lowercase ):
_lowercase : Dict = tuple(t.to(_lowercase ) for t in batch )
_lowercase , _lowercase , _lowercase , _lowercase : Dict = batch
_lowercase : Optional[Any] = model(_lowercase, mc_token_ids=_lowercase, lm_labels=_lowercase, mc_labels=_lowercase )
_lowercase : Any = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_lowercase : str = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_lowercase : Dict = 'Training loss: {:.2e} lr: {:.2e}'.format(_lowercase, scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_lowercase : List[str] = model.module if hasattr(_lowercase, 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_lowercase : Optional[int] = os.path.join(args.output_dir, _lowercase )
_lowercase : List[Any] = os.path.join(args.output_dir, _lowercase )
torch.save(model_to_save.state_dict(), _lowercase )
model_to_save.config.to_json_file(_lowercase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_lowercase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_lowercase : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_lowercase )
if args.do_eval:
model.eval()
_lowercase , _lowercase : List[Any] = 0, 0
_lowercase , _lowercase : List[str] = 0, 0
for batch in tqdm(_lowercase, desc='Evaluating' ):
_lowercase : str = tuple(t.to(_lowercase ) for t in batch )
_lowercase , _lowercase , _lowercase , _lowercase : Any = batch
with torch.no_grad():
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = model(
_lowercase, mc_token_ids=_lowercase, lm_labels=_lowercase, mc_labels=_lowercase )
_lowercase : List[str] = mc_logits.detach().cpu().numpy()
_lowercase : Any = mc_labels.to('cpu' ).numpy()
_lowercase : int = accuracy(_lowercase, _lowercase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_lowercase : Tuple = eval_loss / nb_eval_steps
_lowercase : Optional[int] = eval_accuracy / nb_eval_examples
_lowercase : Tuple = tr_loss / nb_tr_steps if args.do_train else None
_lowercase : List[Any] = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
_lowercase : Optional[Any] = os.path.join(args.output_dir, 'eval_results.txt' )
with open(_lowercase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s', _lowercase, str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline | 670 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
SCREAMING_SNAKE_CASE :Tuple = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _lowerCAmelCase ( lowerCAmelCase_ :Any )->Any:
'''simple docstring'''
snake_case_ = list(s_dict.keys() )
for key in keys:
snake_case_ = r".*/layers_(\d+)"
snake_case_ = key
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case_ = re.sub(r"layers_(\d+)" , r"block/\1/layer" , lowerCAmelCase_ )
snake_case_ = r"(encoder|decoder)\/"
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case_ = re.match(lowerCAmelCase_ , lowerCAmelCase_ ).groups()
if groups[0] == "encoder":
snake_case_ = re.sub(r"/mlp/" , r"/1/mlp/" , lowerCAmelCase_ )
snake_case_ = re.sub(r"/pre_mlp_layer_norm/" , r"/1/layer_norm/" , lowerCAmelCase_ )
elif groups[0] == "decoder":
snake_case_ = re.sub(r"/mlp/" , r"/2/mlp/" , lowerCAmelCase_ )
snake_case_ = re.sub(r"/pre_mlp_layer_norm/" , r"/2/layer_norm/" , lowerCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
snake_case_ = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
print(F'''{key} -> {new_key}''' )
snake_case_ = s_dict.pop(lowerCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
snake_case_ = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
snake_case_ = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
snake_case_ = s_dict[key].shape[0]
snake_case_ = s_dict[key]
for idx in range(lowerCAmelCase_ ):
snake_case_ = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring' )}''' )
s_dict.pop(lowerCAmelCase_ )
return s_dict
SCREAMING_SNAKE_CASE :Optional[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Any )->Optional[int]:
'''simple docstring'''
import regex as re
with open(lowerCAmelCase_ , "r" ) as f:
snake_case_ = f.read()
snake_case_ = re.findall(r"(.*) = ([0-9.]*)" , lowerCAmelCase_ )
snake_case_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
snake_case_ = float(lowerCAmelCase_ ) if "." in value else int(lowerCAmelCase_ )
snake_case_ = re.findall(r"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase_ )[0]
snake_case_ = str(activation[1] )
snake_case_ = num_experts
snake_case_ = SwitchTransformersConfig(**lowerCAmelCase_ )
return config
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[Any] , lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :Dict=None , lowerCAmelCase_ :str="./" , lowerCAmelCase_ :Optional[int]=8 )->List[str]:
'''simple docstring'''
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
snake_case_ = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
if gin_file is not None:
snake_case_ = convert_gin_to_config(lowerCAmelCase_ , lowerCAmelCase_ )
else:
snake_case_ = SwitchTransformersConfig.from_pretrained(lowerCAmelCase_ )
snake_case_ = SwitchTransformersForConditionalGeneration(lowerCAmelCase_ )
snake_case_ = flax_params["target"]
snake_case_ = flatten_dict(lowerCAmelCase_ , sep="/" )
snake_case_ = rename_keys(lowerCAmelCase_ )
snake_case_ = unflatten_dict(lowerCAmelCase_ , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase_ , lowerCAmelCase_ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
SCREAMING_SNAKE_CASE :int = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 283 | 0 |
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a__ : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[int] = GPTSwaTokenizer
snake_case__ : List[str] = False
snake_case__ : int = True
snake_case__ : Optional[int] = False
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = GPTSwaTokenizer(UpperCAmelCase__ , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : List[Any] ) -> int:
__SCREAMING_SNAKE_CASE = "This is a test"
__SCREAMING_SNAKE_CASE = "This is a test"
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE = "<s>"
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(UpperCAmelCase__ ) , 2_0_0_0 )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE = GPTSwaTokenizer(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
UpperCAmelCase__ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
# fmt: off
self.assertListEqual(
UpperCAmelCase__ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE = GPTSwaTokenizer(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = ["This is a test", "I was born in 92000, and this is falsé."]
__SCREAMING_SNAKE_CASE = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertListEqual(tokenizer.encode_fast(UpperCAmelCase__ ) , UpperCAmelCase__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(tokenizer.decode_fast(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Any ) -> str:
__SCREAMING_SNAKE_CASE = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
__SCREAMING_SNAKE_CASE = {"input_ids": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name="AI-Sweden/gpt-sw3-126m" , sequences=UpperCAmelCase__ , )
| 712 |
"""simple docstring"""
import functools
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(lowerCAmelCase_ ) != 3 or not all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(lowerCAmelCase_ ) == 0:
return 0
if min(lowerCAmelCase_ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(lowerCAmelCase_ ) >= 366:
raise ValueError("All days elements should be less than 366" )
__SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ )
@functools.cache
def dynamic_programming(lowerCAmelCase_ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 553 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase__ : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = num_of_nodes
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : List[Any] = {}
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : int ):
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : int ):
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
__SCREAMING_SNAKE_CASE : List[str] = self.find_component(UpperCAmelCase_ )
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
__SCREAMING_SNAKE_CASE : Any = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
__SCREAMING_SNAKE_CASE : Any = self.find_component(UpperCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_ )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = edge
__SCREAMING_SNAKE_CASE : Optional[int] = self.m_component[u]
__SCREAMING_SNAKE_CASE : int = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__SCREAMING_SNAKE_CASE : List[Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = edge
__SCREAMING_SNAKE_CASE : Dict = self.m_component[u]
__SCREAMING_SNAKE_CASE : int = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
print(F"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__SCREAMING_SNAKE_CASE : List[Any] = [-1] * self.m_num_of_nodes
print(F"The total weight of the minimal spanning tree is: {mst_weight}" )
def lowerCAmelCase_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 578 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__snake_case = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__snake_case = {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
class lowercase__ ( _UpperCAmelCase ):
A__ : Dict =VOCAB_FILES_NAMES
A__ : List[str] =PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] =["""input_ids""", """attention_mask"""]
A__ : str =TaTokenizer
A__ : List[int] =[]
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : List[Any]="<unk>" , UpperCAmelCase_ : Optional[Any]="<pad>" , UpperCAmelCase_ : List[str]=100 , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Union[str, Any] , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE__ = [F'<extra_id_{i}>' for i in range(UpperCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE__ = len(set(filter(lambda UpperCAmelCase_ : bool('extra_id_' in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE__ = extra_ids
@staticmethod
def A_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , UpperCAmelCase_ , )
return max_model_length
def A_ ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
logger.info(F'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def A_ ( self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE__ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def A_ ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A_ ( self : int ):
return list(
set(filter(lambda UpperCAmelCase_ : bool(re.search(r'<extra_id_\d+>' , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def A_ ( self : str ):
return [self.convert_tokens_to_ids(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()]
| 472 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=1_8 , snake_case_=3_0 , snake_case_=4_0_0 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
A_ : str = parent
A_ : Optional[int] = batch_size
A_ : int = num_channels
A_ : Tuple = image_size
A_ : Any = min_resolution
A_ : List[Any] = max_resolution
A_ : Optional[Any] = do_resize
A_ : Dict = size if size is not None else {'height': 1_8, 'width': 2_0}
A_ : Any = do_thumbnail
A_ : Dict = do_align_axis
A_ : Optional[Any] = do_pad
A_ : List[Any] = do_normalize
A_ : Tuple = image_mean
A_ : List[str] = image_std
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Tuple = DonutImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = DonutImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case_ , 'size' ) )
self.assertTrue(hasattr(snake_case_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(snake_case_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(snake_case_ , 'do_pad' ) )
self.assertTrue(hasattr(snake_case_ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case_ , 'image_mean' ) )
self.assertTrue(hasattr(snake_case_ , 'image_std' ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 2_0} )
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
# Previous config had dimensions in (width, height) order
A_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4) )
self.assertEqual(image_processor.size , {'height': 8_4, 'width': 4_2} )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@is_flaky()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
A_ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A_ : str = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
A_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A_ : List[Any] = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
A_ : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A_ : int = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , ) | 705 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowerCamelCase_ : Union[str, Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowerCamelCase_ : Optional[int] = [0, 25, 50]
lowerCamelCase_ : Union[str, Any] = [25, 50, 75]
lowerCamelCase_ : List[Any] = fuzz.membership.trimf(X, abca)
lowerCamelCase_ : Optional[Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowerCamelCase_ : Optional[int] = np.ones(75)
lowerCamelCase_ : Optional[int] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
lowerCamelCase_ : Dict = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowerCamelCase_ : Union[str, Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowerCamelCase_ : List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowerCamelCase_ : Tuple = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowerCamelCase_ : Any = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowerCamelCase_ : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowerCamelCase_ : List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowerCamelCase_ : List[Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show() | 302 | 0 |
lowerCAmelCase = [0, 2, 4, 6, 8]
lowerCAmelCase = [1, 3, 5, 7, 9]
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase__ = 0
for digit in range(10 ):
lowercase__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return result
lowercase__ = 0
for digita in range(10 ):
lowercase__ = digita
if (remainder + digita) % 2 == 0:
lowercase__ = ODD_DIGITS
else:
lowercase__ = EVEN_DIGITS
for digita in other_parity_digits:
lowercase__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
return result
def _a ( SCREAMING_SNAKE_CASE = 9 ):
"""simple docstring"""
lowercase__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(SCREAMING_SNAKE_CASE , 0 , [0] * length , SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 43 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="divided_space_time" , _SCREAMING_SNAKE_CASE=None , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : str = image_size
UpperCAmelCase : str = num_channels
UpperCAmelCase : int = patch_size
UpperCAmelCase : Union[str, Any] = num_frames
UpperCAmelCase : Dict = is_training
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : int = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : List[Any] = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = attention_type
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Tuple = scope
UpperCAmelCase : Optional[Any] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
UpperCAmelCase : Dict = (image_size // patch_size) ** 2
UpperCAmelCase : int = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
UpperCAmelCase : Union[str, Any] = self.num_labels
return config
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple = TimesformerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = TimesformerForVideoClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
# verify the logits shape
UpperCAmelCase : Optional[int] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : int = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__lowerCAmelCase : List[str] = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : int = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : List[Any] = False
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Any = TimesformerModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(
self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] = copy.deepcopy(_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : int = TimesformerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = True
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = self.model_tester.seq_length
UpperCAmelCase : Dict = self.model_tester.num_frames
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Tuple = False
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase : int = True
UpperCAmelCase : int = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase : int = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Tuple = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
UpperCAmelCase : int = len(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
UpperCAmelCase : str = True
UpperCAmelCase : int = True
UpperCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1 , len(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Optional[int] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[str] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase : str = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : int = outputs.hidden_states
UpperCAmelCase : Optional[int] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : int = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( ):
UpperCAmelCase : int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
UpperCAmelCase : int = np.load(UpperCamelCase )
return list(UpperCamelCase )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : List[str] = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = self.default_image_processor
UpperCAmelCase : Optional[int] = prepare_video()
UpperCAmelCase : List[Any] = image_processor(video[:8] , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase : Tuple = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase : Union[str, Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 160 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCAmelCase__ ( a_ : int , a_ : int , a_ : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase__ : str = tau * frequency / samplerate
UpperCAmelCase__ : List[Any] = sin(a_ )
UpperCAmelCase__ : int = cos(a_ )
UpperCAmelCase__ : str = _sin / (2 * q_factor)
UpperCAmelCase__ : List[str] = (1 - _cos) / 2
UpperCAmelCase__ : Tuple = 1 - _cos
UpperCAmelCase__ : List[Any] = 1 + alpha
UpperCAmelCase__ : Tuple = -2 * _cos
UpperCAmelCase__ : Union[str, Any] = 1 - alpha
UpperCAmelCase__ : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCAmelCase__ ( a_ : int , a_ : int , a_ : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase__ : str = tau * frequency / samplerate
UpperCAmelCase__ : Any = sin(a_ )
UpperCAmelCase__ : Any = cos(a_ )
UpperCAmelCase__ : Any = _sin / (2 * q_factor)
UpperCAmelCase__ : int = (1 + _cos) / 2
UpperCAmelCase__ : List[Any] = -1 - _cos
UpperCAmelCase__ : Optional[Any] = 1 + alpha
UpperCAmelCase__ : int = -2 * _cos
UpperCAmelCase__ : List[str] = 1 - alpha
UpperCAmelCase__ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCAmelCase__ ( a_ : int , a_ : int , a_ : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase__ : Union[str, Any] = tau * frequency / samplerate
UpperCAmelCase__ : List[Any] = sin(a_ )
UpperCAmelCase__ : Optional[Any] = cos(a_ )
UpperCAmelCase__ : str = _sin / (2 * q_factor)
UpperCAmelCase__ : Dict = _sin / 2
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : Any = -ba
UpperCAmelCase__ : Tuple = 1 + alpha
UpperCAmelCase__ : List[str] = -2 * _cos
UpperCAmelCase__ : Any = 1 - alpha
UpperCAmelCase__ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCAmelCase__ ( a_ : int , a_ : int , a_ : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase__ : Optional[int] = tau * frequency / samplerate
UpperCAmelCase__ : Optional[int] = sin(a_ )
UpperCAmelCase__ : Optional[int] = cos(a_ )
UpperCAmelCase__ : List[Any] = _sin / (2 * q_factor)
UpperCAmelCase__ : List[Any] = 1 - alpha
UpperCAmelCase__ : Optional[int] = -2 * _cos
UpperCAmelCase__ : int = 1 + alpha
UpperCAmelCase__ : str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCAmelCase__ ( a_ : int , a_ : int , a_ : float , a_ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase__ : Tuple = tau * frequency / samplerate
UpperCAmelCase__ : List[Any] = sin(a_ )
UpperCAmelCase__ : Union[str, Any] = cos(a_ )
UpperCAmelCase__ : Dict = _sin / (2 * q_factor)
UpperCAmelCase__ : Optional[int] = 1_0 ** (gain_db / 4_0)
UpperCAmelCase__ : Optional[Any] = 1 + alpha * big_a
UpperCAmelCase__ : int = -2 * _cos
UpperCAmelCase__ : List[str] = 1 - alpha * big_a
UpperCAmelCase__ : Optional[Any] = 1 + alpha / big_a
UpperCAmelCase__ : List[str] = -2 * _cos
UpperCAmelCase__ : str = 1 - alpha / big_a
UpperCAmelCase__ : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCAmelCase__ ( a_ : int , a_ : int , a_ : float , a_ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase__ : str = tau * frequency / samplerate
UpperCAmelCase__ : Union[str, Any] = sin(a_ )
UpperCAmelCase__ : Union[str, Any] = cos(a_ )
UpperCAmelCase__ : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase__ : str = 1_0 ** (gain_db / 4_0)
UpperCAmelCase__ : Dict = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase__ : Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase__ : Optional[int] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase__ : str = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase__ : Union[str, Any] = 2 * sqrt(a_ ) * alpha
UpperCAmelCase__ : Tuple = big_a * (pmc + aaa)
UpperCAmelCase__ : str = 2 * big_a * mpc
UpperCAmelCase__ : List[Any] = big_a * (pmc - aaa)
UpperCAmelCase__ : Tuple = ppmc + aaa
UpperCAmelCase__ : List[Any] = -2 * pmpc
UpperCAmelCase__ : List[Any] = ppmc - aaa
UpperCAmelCase__ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCAmelCase__ ( a_ : int , a_ : int , a_ : float , a_ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase__ : List[Any] = tau * frequency / samplerate
UpperCAmelCase__ : str = sin(a_ )
UpperCAmelCase__ : List[str] = cos(a_ )
UpperCAmelCase__ : Optional[int] = _sin / (2 * q_factor)
UpperCAmelCase__ : Union[str, Any] = 1_0 ** (gain_db / 4_0)
UpperCAmelCase__ : Dict = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase__ : str = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase__ : Optional[int] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase__ : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase__ : Optional[int] = 2 * sqrt(a_ ) * alpha
UpperCAmelCase__ : int = big_a * (ppmc + aaa)
UpperCAmelCase__ : Optional[Any] = -2 * big_a * pmpc
UpperCAmelCase__ : Tuple = big_a * (ppmc - aaa)
UpperCAmelCase__ : Any = pmc + aaa
UpperCAmelCase__ : List[str] = 2 * mpc
UpperCAmelCase__ : str = pmc - aaa
UpperCAmelCase__ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt | 599 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = "facebook/bart-large-mnli"
SCREAMING_SNAKE_CASE : int = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
SCREAMING_SNAKE_CASE : Union[str, Any] = "text_classifier"
SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer
SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Union[str, Any] = ["text", ["text"]]
SCREAMING_SNAKE_CASE : Dict = ["text"]
def lowerCamelCase ( self ):
super().setup()
UpperCAmelCase__ : Tuple = self.model.config
UpperCAmelCase__ : Dict = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
UpperCAmelCase__ : Optional[Any] = int(_UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ : Optional[int] = labels
return self.pre_processor(
[text] * len(_UpperCAmelCase ) , [F"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ : Tuple = outputs.logits
UpperCAmelCase__ : Optional[int] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 599 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ = '▁'
snake_case_ = {'vocab_file': 'spiece.model'}
snake_case_ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
snake_case_ = {
'google/pegasus-xsum': 5_1_2,
}
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = VOCAB_FILES_NAMES
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
def __init__( self , lowercase__ , lowercase__="<pad>" , lowercase__="</s>" , lowercase__="<unk>" , lowercase__="<mask_2>" , lowercase__="<mask_1>" , lowercase__=None , lowercase__=103 , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = offset
if additional_special_tokens is not None:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError(
F"additional_special_tokens should be of type {type(lowercase__ )}, but is"
F" {type(lowercase__ )}" )
SCREAMING_SNAKE_CASE_ : Any = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(lowercase__ ) , self.offset - 1 )
]
if len(set(lowercase__ ) ) != len(lowercase__ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
SCREAMING_SNAKE_CASE_ : str = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE_ : List[str] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )]
SCREAMING_SNAKE_CASE_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase__ , unk_token=lowercase__ , mask_token=lowercase__ , pad_token=lowercase__ , mask_token_sent=lowercase__ , offset=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Dict = mask_token_sent
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_file
SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
# add special tokens to encoder dict
SCREAMING_SNAKE_CASE_ : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
SCREAMING_SNAKE_CASE_ : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.offset
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Any = None
return state
def __setstate__( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
SCREAMING_SNAKE_CASE_ : Dict = self.sp_model.piece_to_id(lowercase__ )
return sp_id + self.offset
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.IdToPiece(index - self.offset )
return token
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : int = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase__ ) + token
SCREAMING_SNAKE_CASE_ : Any = []
else:
current_sub_tokens.append(lowercase__ )
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def __lowerCamelCase ( self , lowercase__=False ):
"""simple docstring"""
return 1
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowercase__ )
elif token_ids_a is None:
return self._special_token_mask(lowercase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __lowerCamelCase ( self , lowercase__ , lowercase__=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(
lowercase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , "wb" ) as fi:
SCREAMING_SNAKE_CASE_ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 421 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@staticmethod
@abstractmethod
def __lowerCamelCase ( lowercase__ ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def __lowerCamelCase ( self ):
"""simple docstring"""
raise NotImplementedError()
| 421 | 1 |
'''simple docstring'''
from itertools import product
def lowercase_ ( lowercase__ , lowercase__ ) ->list[int]:
_snake_case: Dict = sides_number
_snake_case: List[str] = max_face_number * dice_number
_snake_case: Tuple = [0] * (max_total + 1)
_snake_case: Optional[Any] = 1
_snake_case: Optional[int] = range(lowercase__ , max_face_number + 1 )
for dice_numbers in product(lowercase__ , repeat=lowercase__ ):
_snake_case: int = sum(lowercase__ )
totals_frequencies[total] += 1
return totals_frequencies
def lowercase_ ( ) ->float:
_snake_case: List[str] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_snake_case: List[str] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_snake_case: Tuple = 0
_snake_case: List[Any] = 9
_snake_case: List[str] = 4 * 9
_snake_case: Optional[Any] = 6
for peter_total in range(lowercase__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_snake_case: List[Any] = (4**9) * (6**6)
_snake_case: str = peter_wins_count / total_games_number
_snake_case: Tuple = round(lowercase__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'{solution() = }')
| 273 |
'''simple docstring'''
from __future__ import annotations
A : Union[str, Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
A : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase_ ( lowercase__ ) ->list[float]:
_snake_case: Tuple = []
_snake_case: List[Any] = len(lowercase__ )
for i in range(lowercase__ ):
_snake_case: float = -1
for j in range(i + 1 , lowercase__ ):
if arr[i] < arr[j]:
_snake_case: List[str] = arr[j]
break
result.append(lowercase__ )
return result
def lowercase_ ( lowercase__ ) ->list[float]:
_snake_case: Tuple = []
for i, outer in enumerate(lowercase__ ):
_snake_case: float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_snake_case: List[Any] = inner
break
result.append(lowercase__ )
return result
def lowercase_ ( lowercase__ ) ->list[float]:
_snake_case: int = len(lowercase__ )
_snake_case: list[float] = []
_snake_case: list[float] = [-1] * arr_size
for index in reversed(range(lowercase__ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_snake_case: Dict = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
A : Union[str, Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 273 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
def lowercase__ ( A_: int , A_: Optional[Any] ) -> int:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class _A :
"""simple docstring"""
lowerCamelCase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase : Optional[str] = field(
default=UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase : Optional[str] = field(
default=UpperCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase : Optional[str] = field(
default=UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class _A :
"""simple docstring"""
lowerCamelCase : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
lowerCamelCase : str = field(metadata={'help': 'Should contain the data files for the task.'} )
lowerCamelCase : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase : bool = field(
default=UpperCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowercase__ ( ) -> str:
"""simple docstring"""
__UpperCAmelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , A_ )
# Set seed
set_seed(training_args.seed )
try:
__UpperCAmelCase =processors[data_args.task_name]()
__UpperCAmelCase =processor.get_labels()
__UpperCAmelCase =len(A_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCAmelCase =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=A_ , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCAmelCase =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCAmelCase =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(A_: EvalPrediction ) -> Dict:
__UpperCAmelCase =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(A_ , p.label_ids )}
# Data collator
__UpperCAmelCase =DataCollatorWithPadding(A_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCAmelCase =Trainer(
model=A_ , args=A_ , train_dataset=A_ , eval_dataset=A_ , compute_metrics=A_ , data_collator=A_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__UpperCAmelCase =trainer.evaluate()
__UpperCAmelCase =os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(A_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , A_ , A_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(A_ )
return results
def lowercase__ ( A_: Union[str, Any] ) -> Any:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 68 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ):
super().__init__()
UpperCamelCase__ :Tuple = value_function
UpperCamelCase__ :Optional[int] = unet
UpperCamelCase__ :List[str] = scheduler
UpperCamelCase__ :Dict = env
UpperCamelCase__ :Dict = env.get_dataset()
UpperCamelCase__ :Union[str, Any] = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase__ :Any = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase__ :List[Any] = env.observation_space.shape[0]
UpperCamelCase__ :List[str] = env.action_space.shape[0]
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ):
return (x_in - self.means[key]) / self.stds[key]
def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
return x_in * self.stds[key] + self.means[key]
def __a ( self :Any , lowerCamelCase__ :int ):
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
for key, val in cond.items():
UpperCamelCase__ :str = val.clone()
return x_in
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = x.shape[0]
UpperCamelCase__ :List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ )
UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance )
UpperCamelCase__ :Dict = model_std * grad
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Dict = x.detach()
UpperCamelCase__ :int = x + scale * grad
UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ):
# normalize the observations and create batch dimension
UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" )
UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 )
UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )}
UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device )
UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
UpperCamelCase__ :Dict = x[sorted_idx]
UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim]
UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy()
UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCamelCase__ :List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ )
UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0]
return denorm_actions | 45 | 0 |
from __future__ import annotations
import math
from collections.abc import Callable
def A_ ( __a : Callable[[int | float], int | float] , __a : int | float , __a : int | float , __a : int = 100 , ):
"""simple docstring"""
a__ = x_start
a__ = fnc(__a )
a__ = 0.0
for _ in range(__a ):
# Approximates curve as a sequence of linear lines and sums their length
a__ = (x_end - x_start) / steps + xa
a__ = fnc(__a )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
a__ = xa
a__ = fxa
return length
if __name__ == "__main__":
def A_ ( __a : Any ):
"""simple docstring"""
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
UpperCAmelCase = 10
while i <= 100_000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 351 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase):
'''simple docstring'''
def _a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self ):
torch.manual_seed(0 )
a__ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def _a ( self ):
torch.manual_seed(0 )
a__ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def _a ( self ):
torch.manual_seed(0 )
a__ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
a__ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def _a ( self ):
a__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
a__ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
a__ = DDPMScheduler()
a__ = AudioDiffusionPipeline(vqvae=a_ , unet=self.dummy_unet , mel=a_ , scheduler=a_ )
a__ = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
a__ = torch.Generator(device=a_ ).manual_seed(42 )
a__ = pipe(generator=a_ , steps=4 )
a__ = output.audios[0]
a__ = output.images[0]
a__ = torch.Generator(device=a_ ).manual_seed(42 )
a__ = pipe(generator=a_ , steps=4 , return_dict=a_ )
a__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
a__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
a__ = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
a__ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
a__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
a__ = DDIMScheduler()
a__ = self.dummy_vqvae_and_unet
a__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=a_ , scheduler=a_ )
a__ = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
np.random.seed(0 )
a__ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
a__ = torch.Generator(device=a_ ).manual_seed(42 )
a__ = pipe(raw_audio=a_ , generator=a_ , start_step=5 , steps=10 )
a__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
a__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
a__ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
a__ = self.dummy_unet_condition
a__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=a_ , mel=a_ , scheduler=a_ )
a__ = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
np.random.seed(0 )
a__ = torch.rand((1, 1, 10) )
a__ = pipe(generator=a_ , encoding=a_ )
a__ = output.images[0]
a__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
a__ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
'''simple docstring'''
def _a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
a__ = torch_device
a__ = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
a__ = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
a__ = torch.Generator(device=a_ ).manual_seed(42 )
a__ = pipe(generator=a_ )
a__ = output.audios[0]
a__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
a__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
a__ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 351 | 1 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( lowercase_ : ArgumentParser) -> str:
"""simple docstring"""
_UpperCamelCase = parser.add_parser("download")
download_parser.add_argument(
"--cache-dir" , type=lowercase_ , default=lowercase_ , help="Path to location to store the models")
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir")
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=lowercase_ , help="Name of the model to download")
download_parser.set_defaults(func=lowercase_)
def __init__( self : List[str] , lowercase_ : str , lowercase_ : str , lowercase_ : bool , lowercase_ : bool) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = model
_UpperCamelCase = cache
_UpperCamelCase = force
_UpperCamelCase = trust_remote_code
def __UpperCAmelCase ( self : List[Any]) -> Any:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code)
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code)
| 547 | from __future__ import annotations
def lowerCAmelCase__ ( a__ , a__ ) ->bool:
'''simple docstring'''
_UpperCamelCase = get_failure_array(a__ )
# 2) Step through text searching for pattern
_UpperCamelCase , _UpperCamelCase = 0, 0 # index into text, pattern
while i < len(a__ ):
if pattern[j] == text[i]:
if j == (len(a__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_UpperCamelCase = failure[j - 1]
continue
i += 1
return False
def lowerCAmelCase__ ( a__ ) ->list[int]:
'''simple docstring'''
_UpperCamelCase = [0]
_UpperCamelCase = 0
_UpperCamelCase = 1
while j < len(a__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_UpperCamelCase = failure[i - 1]
continue
j += 1
failure.append(a__ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase__ = '''abc1abc12'''
lowerCamelCase__ = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase__ = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase__ = '''ABABX'''
lowerCamelCase__ = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase__ = '''AAAB'''
lowerCamelCase__ = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase__ = '''abcdabcy'''
lowerCamelCase__ = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase__ = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 547 | 1 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = """segformer"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.0_2 , snake_case_=0.1 , snake_case_=1e-6 , snake_case_=256 , snake_case_=255 , **snake_case_ , ):
'''simple docstring'''
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , snake_case_ , )
__UpperCAmelCase: Tuple = num_channels
__UpperCAmelCase: Dict = num_encoder_blocks
__UpperCAmelCase: int = depths
__UpperCAmelCase: Union[str, Any] = sr_ratios
__UpperCAmelCase: Optional[Any] = hidden_sizes
__UpperCAmelCase: Optional[Any] = patch_sizes
__UpperCAmelCase: List[Any] = strides
__UpperCAmelCase: Dict = mlp_ratios
__UpperCAmelCase: str = num_attention_heads
__UpperCAmelCase: List[str] = hidden_act
__UpperCAmelCase: List[Any] = hidden_dropout_prob
__UpperCAmelCase: Dict = attention_probs_dropout_prob
__UpperCAmelCase: Optional[Any] = classifier_dropout_prob
__UpperCAmelCase: int = initializer_range
__UpperCAmelCase: Tuple = drop_path_rate
__UpperCAmelCase: Union[str, Any] = layer_norm_eps
__UpperCAmelCase: Union[str, Any] = decoder_hidden_size
__UpperCAmelCase: Dict = kwargs.get("""reshape_last_stage""" , snake_case_ )
__UpperCAmelCase: int = semantic_loss_ignore_index
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = version.parse("""1.11""" )
@property
def lowercase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowercase_ ( self ):
'''simple docstring'''
return 1e-4
@property
def lowercase_ ( self ):
'''simple docstring'''
return 12 | 717 | '''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = 10
def UpperCamelCase__ ( _lowercase : list[int] ) -> list[int]:
__UpperCAmelCase: Union[str, Any] = 1
__UpperCAmelCase: Optional[Any] = max(_lowercase )
while placement <= max_digit:
# declare and initialize empty buckets
__UpperCAmelCase: list[list] = [[] for _ in range(_lowercase )]
# split list_of_ints between the buckets
for i in list_of_ints:
__UpperCAmelCase: Optional[Any] = int((i / placement) % RADIX )
buckets[tmp].append(_lowercase )
# put each buckets' contents into list_of_ints
__UpperCAmelCase: Optional[int] = 0
for b in range(_lowercase ):
for i in buckets[b]:
__UpperCAmelCase: str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod() | 466 | 0 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def a (_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm1.weight", F"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm1.bias", F"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.weight", F"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.bias", F"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm2.weight", F"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm2.bias", F"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.weight", F"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.bias", F"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc2.weight", F"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.mlp.fc2.bias", F"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def a (_lowerCAmelCase , _lowerCAmelCase ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F"encoder.deit.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE_ = in_proj_weight[
: encoder_config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = dct.pop(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = val
def a (_lowerCAmelCase ):
if "handwritten" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def a (_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = ViTConfig(image_size=3_8_4 , qkv_bias=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
SCREAMING_SNAKE_CASE_ = 1_0_2_4
SCREAMING_SNAKE_CASE_ = 4_0_9_6
SCREAMING_SNAKE_CASE_ = 2_4
SCREAMING_SNAKE_CASE_ = 1_6
SCREAMING_SNAKE_CASE_ = 1_0_2_4
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = '''relu'''
SCREAMING_SNAKE_CASE_ = 1_0_2_4
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = TrOCRForCausalLM(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location='''cpu''' , check_hash=_lowerCAmelCase )['''model''']
SCREAMING_SNAKE_CASE_ = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE_ = state_dict.pop(_lowerCAmelCase )
if key.startswith('''decoder''' ) and "output_projection" not in key:
SCREAMING_SNAKE_CASE_ = val
else:
SCREAMING_SNAKE_CASE_ = val
# load state dict
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
SCREAMING_SNAKE_CASE_ = ViTImageProcessor(size=encoder_config.image_size )
SCREAMING_SNAKE_CASE_ = RobertaTokenizer.from_pretrained('''roberta-large''' )
SCREAMING_SNAKE_CASE_ = TrOCRProcessor(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = processor(images=prepare_img(_lowerCAmelCase ) , return_tensors='''pt''' ).pixel_values
# verify logits
SCREAMING_SNAKE_CASE_ = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits
SCREAMING_SNAKE_CASE_ = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , _lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 234 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def a ():
raise RuntimeError('''CUDA out of memory.''' )
class __magic_name__ ( nn.Module):
'''simple docstring'''
def __init__( self: Optional[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE_ = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE_ = nn.Linear(4 , 5 )
def _A ( self: int , _lowerCamelCase: List[Any] ):
return self.lineara(self.batchnorm(self.lineara(_lowerCamelCase ) ) )
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
def _A ( self: List[Any] ):
SCREAMING_SNAKE_CASE_ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_lowerCamelCase: str ):
nonlocal batch_sizes
batch_sizes.append(_lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_lowerCamelCase , [1_28, 64, 32, 16, 8] )
def _A ( self: int ):
SCREAMING_SNAKE_CASE_ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_lowerCamelCase: Optional[Any] , _lowerCamelCase: Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(_lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = mock_training_loop_function('''hello''' )
self.assertListEqual(_lowerCamelCase , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def _A ( self: str ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(_lowerCamelCase: Union[str, Any] ):
pass
with self.assertRaises(_lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def _A ( self: int ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_lowerCamelCase: str ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def _A ( self: List[Any] ):
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_lowerCamelCase: str , _lowerCamelCase: int , _lowerCamelCase: Tuple ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_lowerCamelCase ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def _A ( self: Dict ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_lowerCamelCase: Dict ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(_lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def _A ( self: str ):
SCREAMING_SNAKE_CASE_ = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE_ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = release_memory(_lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , _lowerCamelCase )
| 234 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_lowerCamelCase = random.Random()
if is_torch_available():
import torch
def __UpperCAmelCase( lowercase_ , lowercase_=1.0 , lowercase_=None , lowercase_=None ):
if rng is None:
_lowerCamelCase : Any = global_rng
_lowerCamelCase : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a__ , a__=7 , a__=400 , a__=2000 , a__=1 , a__=0.0 , a__=1_6000 , a__=True , a__=True , ):
"""simple docstring"""
_lowerCamelCase : str = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Union[str, Any] = min_seq_length
_lowerCamelCase : Any = max_seq_length
_lowerCamelCase : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase : Optional[Any] = feature_size
_lowerCamelCase : Optional[int] = padding_value
_lowerCamelCase : str = sampling_rate
_lowerCamelCase : Tuple = return_attention_mask
_lowerCamelCase : str = do_normalize
def __snake_case ( self):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __snake_case ( self , a__=False , a__=False):
"""simple docstring"""
def _flatten(a__):
return list(itertools.chain(*a__))
if equal_length:
_lowerCamelCase : Optional[Any] = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
_lowerCamelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
_lowerCamelCase : Any = [np.asarray(a__) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A ( lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = ASTFeatureExtractor
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Tuple = ASTFeatureExtractionTester(self)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase : List[Any] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
_lowerCamelCase : Dict = [np.asarray(a__) for speech_input in speech_inputs]
# Test not batched input
_lowerCamelCase : str = feat_extract(speech_inputs[0] , return_tensors='''np''').input_values
_lowerCamelCase : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''').input_values
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3))
# Test batched
_lowerCamelCase : int = feat_extract(a__ , padding=a__ , return_tensors='''np''').input_values
_lowerCamelCase : int = feat_extract(a__ , padding=a__ , return_tensors='''np''').input_values
for enc_seq_a, enc_seq_a in zip(a__ , a__):
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3))
# Test 2-D numpy arrays are batched.
_lowerCamelCase : Dict = [floats_list((1, x))[0] for x in (800, 800, 800)]
_lowerCamelCase : List[Any] = np.asarray(a__)
_lowerCamelCase : Any = feat_extract(a__ , return_tensors='''np''').input_values
_lowerCamelCase : Any = feat_extract(a__ , return_tensors='''np''').input_values
for enc_seq_a, enc_seq_a in zip(a__ , a__):
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3))
@require_torch
def __snake_case ( self):
"""simple docstring"""
import torch
_lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_lowerCamelCase : List[str] = np.random.rand(100).astype(np.floataa)
_lowerCamelCase : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
_lowerCamelCase : Dict = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def __snake_case ( self , a__):
"""simple docstring"""
from datasets import load_dataset
_lowerCamelCase : Tuple = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''')
# automatic decoding with librispeech
_lowerCamelCase : Optional[int] = ds.sort('''id''').select(range(a__))[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869])
# fmt: on
_lowerCamelCase : List[Any] = self._load_datasamples(1)
_lowerCamelCase : List[Any] = ASTFeatureExtractor()
_lowerCamelCase : Union[str, Any] = feature_extractor(a__ , return_tensors='''pt''').input_values
self.assertEquals(input_values.shape , (1, 1024, 128))
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a__ , atol=1e-4))
| 613 |
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
return round(float(moles / volume ) * nfactor )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 613 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A__ : Optional[int] = None
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
A__ : List[str] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
A__ : Optional[int] = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
A__ : Tuple = """▁"""
# Segments (not really needed)
A__ : Optional[Any] = 0
A__ : Dict = 1
A__ : Any = 2
A__ : Dict = 3
A__ : Union[str, Any] = 4
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] = 'left'
lowerCamelCase : Optional[Any] = XLNetTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<sep>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<cls>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=["<eop>", "<eod>"] , **SCREAMING_SNAKE_CASE_ , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , remove_space=SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Tuple = 3
__lowerCamelCase : int = do_lower_case
__lowerCamelCase : Dict = remove_space
__lowerCamelCase : str = keep_accents
__lowerCamelCase : List[Any] = vocab_file
__lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : Any = [self.sep_token_id]
__lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : Any = [self.sep_token_id]
__lowerCamelCase : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 13 | import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__lowerCamelCase : str = get_logger(__name__)
class a__ ( enum.Enum ):
A = 'all_checks'
A = 'basic_checks'
A = 'no_checks'
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
def _snake_case ( lowerCAmelCase : Optional[dict] , lowerCAmelCase : dict , lowerCAmelCase : List[Any]=None ):
"""simple docstring"""
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE_ : int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE_ : List[str] = " for " + verification_name if verification_name is not None else ""
if len(lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
f'Checksums didn\'t match{for_verification_name}:\n'
f'{bad_urls}\n'
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
def _snake_case ( lowerCAmelCase : Optional[dict] , lowerCAmelCase : dict ):
"""simple docstring"""
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE_ : Tuple = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(lowerCAmelCase ) )
logger.info("All the splits matched successfully." )
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : bool = True ):
"""simple docstring"""
if record_checksum:
SCREAMING_SNAKE_CASE_ : int = shaaaa()
with open(lowerCAmelCase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , B"" ):
m.update(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = m.hexdigest()
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
return {"num_bytes": os.path.getsize(lowerCAmelCase ), "checksum": checksum}
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 216 | 0 |
"""simple docstring"""
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCamelCase ( nn.Module ):
def __init__(self , __a , __a ) -> Tuple:
super().__init__()
UpperCamelCase = module
UpperCamelCase = nn.Sequential(
nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , )
UpperCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def snake_case_ (self , __a , *__a , **__a ) -> int:
return self.module(__a , *__a , **__a ) + self.adapter(__a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCamelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
UpperCAmelCase_ = "bigscience/bloom-1b7"
# Constant values
UpperCAmelCase_ = 2.109659552692574
UpperCAmelCase_ = "Hello my name is"
UpperCAmelCase_ = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
UpperCAmelCase_ = 10
def snake_case_ (self ) -> str:
# Models and tokenizer
UpperCamelCase = AutoTokenizer.from_pretrained(self.model_name )
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> Any:
super().setUp()
# Models and tokenizer
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
def snake_case_ (self ) -> str:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = self.model_abit.config
self.assertTrue(hasattr(__a , "quantization_config" ) )
UpperCamelCase = config.to_dict()
UpperCamelCase = config.to_diff_dict()
UpperCamelCase = config.to_json_string()
def snake_case_ (self ) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
UpperCamelCase = self.model_fpaa.get_memory_footprint()
UpperCamelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
UpperCamelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def snake_case_ (self ) -> Dict:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def snake_case_ (self ) -> Tuple:
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCamelCase = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = BitsAndBytesConfig()
UpperCamelCase = True
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , device_map="auto" )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCamelCase = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def snake_case_ (self ) -> Dict:
with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__a )
def snake_case_ (self ) -> int:
UpperCamelCase = BitsAndBytesConfig()
with self.assertRaises(__a ):
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , load_in_abit=__a , device_map="auto" , bnb_abit_quant_type="nf4" , )
def snake_case_ (self ) -> Any:
with self.assertRaises(__a ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(__a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCamelCase = self.model_fpaa.to(torch.floataa )
UpperCamelCase = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
UpperCamelCase = self.model_fpaa.to("cpu" )
# Check this does not throw an error
UpperCamelCase = self.model_fpaa.half()
# Check this does not throw an error
UpperCamelCase = self.model_fpaa.float()
def snake_case_ (self ) -> str:
UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=__a , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCamelCase ( unittest.TestCase ):
@classmethod
def snake_case_ (cls ) -> Tuple:
UpperCamelCase = "t5-small"
UpperCamelCase = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
UpperCamelCase = AutoTokenizer.from_pretrained(cls.model_name )
UpperCamelCase = "Translate in German: Hello, my dog is cute"
def snake_case_ (self ) -> Any:
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
UpperCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules
UpperCamelCase = None
# test with `t5-small`
UpperCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCamelCase = model.generate(**__a )
# test with `flan-t5-small`
UpperCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map="auto" )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCamelCase = model.generate(**__a )
UpperCamelCase = modules
def snake_case_ (self ) -> str:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
UpperCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCamelCase = model.generate(**__a )
# test with `flan-t5-small`
UpperCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map="auto" )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCamelCase = model.generate(**__a )
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> Optional[Any]:
super().setUp()
# model_name
UpperCamelCase = "bigscience/bloom-560m"
UpperCamelCase = "t5-small"
# Different types of model
UpperCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
# Sequence classification model
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__a , device_map="auto" )
# CausalLM model
UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
# Seq2seq model
UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__a , device_map="auto" )
def snake_case_ (self ) -> Tuple:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> int:
super().setUp()
def snake_case_ (self ) -> Any:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
UpperCamelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> List[Any]:
super().setUp()
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__a , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
UpperCamelCase = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> Tuple:
UpperCamelCase = "facebook/opt-350m"
super().setUp()
def snake_case_ (self ) -> List[Any]:
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
UpperCamelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
UpperCamelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__a ) ):
UpperCamelCase = LoRALayer(module.q_proj , rank=16 )
UpperCamelCase = LoRALayer(module.k_proj , rank=16 )
UpperCamelCase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
UpperCamelCase = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
UpperCamelCase = model.forward(**__a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__a , __a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "gpt2-xl"
UpperCAmelCase_ = 3.3191854854152187 | 700 |
"""simple docstring"""
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCamelCase ( nn.Module ):
def __init__(self , __a , __a ) -> Tuple:
super().__init__()
UpperCamelCase = module
UpperCamelCase = nn.Sequential(
nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , )
UpperCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def snake_case_ (self , __a , *__a , **__a ) -> int:
return self.module(__a , *__a , **__a ) + self.adapter(__a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCamelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
UpperCAmelCase_ = "bigscience/bloom-1b7"
# Constant values
UpperCAmelCase_ = 2.109659552692574
UpperCAmelCase_ = "Hello my name is"
UpperCAmelCase_ = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
UpperCAmelCase_ = 10
def snake_case_ (self ) -> str:
# Models and tokenizer
UpperCamelCase = AutoTokenizer.from_pretrained(self.model_name )
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> Any:
super().setUp()
# Models and tokenizer
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
def snake_case_ (self ) -> str:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = self.model_abit.config
self.assertTrue(hasattr(__a , "quantization_config" ) )
UpperCamelCase = config.to_dict()
UpperCamelCase = config.to_diff_dict()
UpperCamelCase = config.to_json_string()
def snake_case_ (self ) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
UpperCamelCase = self.model_fpaa.get_memory_footprint()
UpperCamelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
UpperCamelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def snake_case_ (self ) -> Dict:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def snake_case_ (self ) -> Tuple:
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCamelCase = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = BitsAndBytesConfig()
UpperCamelCase = True
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , device_map="auto" )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCamelCase = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def snake_case_ (self ) -> Dict:
with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__a )
def snake_case_ (self ) -> int:
UpperCamelCase = BitsAndBytesConfig()
with self.assertRaises(__a ):
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , load_in_abit=__a , device_map="auto" , bnb_abit_quant_type="nf4" , )
def snake_case_ (self ) -> Any:
with self.assertRaises(__a ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(__a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCamelCase = self.model_fpaa.to(torch.floataa )
UpperCamelCase = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
UpperCamelCase = self.model_fpaa.to("cpu" )
# Check this does not throw an error
UpperCamelCase = self.model_fpaa.half()
# Check this does not throw an error
UpperCamelCase = self.model_fpaa.float()
def snake_case_ (self ) -> str:
UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=__a , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCamelCase ( unittest.TestCase ):
@classmethod
def snake_case_ (cls ) -> Tuple:
UpperCamelCase = "t5-small"
UpperCamelCase = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
UpperCamelCase = AutoTokenizer.from_pretrained(cls.model_name )
UpperCamelCase = "Translate in German: Hello, my dog is cute"
def snake_case_ (self ) -> Any:
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
UpperCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules
UpperCamelCase = None
# test with `t5-small`
UpperCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCamelCase = model.generate(**__a )
# test with `flan-t5-small`
UpperCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map="auto" )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCamelCase = model.generate(**__a )
UpperCamelCase = modules
def snake_case_ (self ) -> str:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
UpperCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCamelCase = model.generate(**__a )
# test with `flan-t5-small`
UpperCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map="auto" )
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCamelCase = model.generate(**__a )
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> Optional[Any]:
super().setUp()
# model_name
UpperCamelCase = "bigscience/bloom-560m"
UpperCamelCase = "t5-small"
# Different types of model
UpperCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
# Sequence classification model
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__a , device_map="auto" )
# CausalLM model
UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map="auto" )
# Seq2seq model
UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__a , device_map="auto" )
def snake_case_ (self ) -> Tuple:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> int:
super().setUp()
def snake_case_ (self ) -> Any:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
UpperCamelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> List[Any]:
super().setUp()
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__a , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
UpperCamelCase = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
UpperCamelCase = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> Tuple:
UpperCamelCase = "facebook/opt-350m"
super().setUp()
def snake_case_ (self ) -> List[Any]:
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
UpperCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
UpperCamelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
UpperCamelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__a ) ):
UpperCamelCase = LoRALayer(module.q_proj , rank=16 )
UpperCamelCase = LoRALayer(module.k_proj , rank=16 )
UpperCamelCase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
UpperCamelCase = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
UpperCamelCase = model.forward(**__a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__a , __a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "gpt2-xl"
UpperCAmelCase_ = 3.3191854854152187
| 544 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : Dict = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''mobilenet_v1'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=2_2_4 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE="relu6" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.999 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.001 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case__ : Optional[Any] = num_channels
snake_case__ : Tuple = image_size
snake_case__ : str = depth_multiplier
snake_case__ : Tuple = min_depth
snake_case__ : List[Any] = hidden_act
snake_case__ : int = tf_padding
snake_case__ : Tuple = classifier_dropout_prob
snake_case__ : Optional[Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def __UpperCamelCase ( self ):
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def __UpperCamelCase ( self ):
return 1e-4
| 38 |
from __future__ import annotations
class __a :
def __init__( self : List[Any] , snake_case_ : str , snake_case_ : str)-> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase =text, pattern
__lowerCAmelCase , __lowerCAmelCase =len(snake_case_), len(snake_case_)
def UpperCamelCase ( self : List[Any] , snake_case_ : str)-> int:
for i in range(self.patLen - 1 , -1 , -1):
if char == self.pattern[i]:
return i
return -1
def UpperCamelCase ( self : List[str] , snake_case_ : int)-> int:
for i in range(self.patLen - 1 , -1 , -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCamelCase ( self : Dict)-> list[int]:
# searches pattern in text and returns index positions
__lowerCAmelCase =[]
for i in range(self.textLen - self.patLen + 1):
__lowerCAmelCase =self.mismatch_in_text(snake_case_)
if mismatch_index == -1:
positions.append(snake_case_)
else:
__lowerCAmelCase =self.match_in_pattern(self.text[mismatch_index])
__lowerCAmelCase =(
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowercase_ = '''ABAABA'''
lowercase_ = '''AB'''
lowercase_ = BoyerMooreSearch(text, pattern)
lowercase_ = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 354 | 0 |
def UpperCamelCase ( _A : int )-> list:
"""simple docstring"""
A__ = int(__SCREAMING_SNAKE_CASE )
if n_element < 1:
A__ = ValueError("a should be a positive number" )
raise my_error
A__ = [1]
A__ , A__ , A__ = (0, 0, 0)
A__ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
UpperCAmelCase_ : Union[str, Any] = hamming(int(n))
print("-----------------------------------------------------")
print(F'''The list with nth numbers is: {hamming_numbers}''')
print("-----------------------------------------------------")
| 701 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : int = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 232 | 0 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
lowercase__ , lowercase__ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
lowercase__ = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
lowercase__ = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowercase__ = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 638 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase__ = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowercase__ = {"facebook/blenderbot-3B": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
snake_case : int = bs[:]
snake_case : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE__ )
cs.append(2**8 + n )
n += 1
snake_case : Tuple = [chr(SCREAMING_SNAKE_CASE__ ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Any:
'''simple docstring'''
snake_case : List[str] = set()
snake_case : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case : Dict = char
return pairs
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str="replace" , UpperCamelCase__ : int="<s>" , UpperCamelCase__ : Optional[int]="</s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : Any="<unk>" , UpperCamelCase__ : List[Any]="<pad>" , UpperCamelCase__ : Optional[int]="<mask>" , UpperCamelCase__ : Optional[Any]=False , **UpperCamelCase__ : Dict , ) -> str:
"""simple docstring"""
snake_case : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
snake_case : Any = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
snake_case : List[str] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
snake_case : Optional[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
snake_case : int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
snake_case : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding='''utf-8''' ) as vocab_handle:
snake_case : Union[str, Any] = json.load(UpperCamelCase__ )
snake_case : Dict = {v: k for k, v in self.encoder.items()}
snake_case : Optional[int] = errors # how to handle errors in decoding
snake_case : List[Any] = bytes_to_unicode()
snake_case : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding='''utf-8''' ) as merges_handle:
snake_case : Optional[Any] = merges_handle.read().split('''\n''' )[1:-1]
snake_case : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
snake_case : Optional[int] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : int = {}
snake_case : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case : Any = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.encoder )
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
snake_case : str = tuple(UpperCamelCase__ )
snake_case : Union[str, Any] = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
snake_case : Tuple = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case ,snake_case : List[str] = bigram
snake_case : int = []
snake_case : int = 0
while i < len(UpperCamelCase__ ):
try:
snake_case : Optional[int] = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case : Optional[int] = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case : str = tuple(UpperCamelCase__ )
snake_case : Tuple = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
snake_case : List[str] = get_pairs(UpperCamelCase__ )
snake_case : str = ''' '''.join(UpperCamelCase__ )
snake_case : int = word
return word
def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case : str = []
for token in re.findall(self.pat , UpperCamelCase__ ):
snake_case : Optional[int] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(''' ''' ) )
return bpe_tokens
def lowerCAmelCase ( self : int , UpperCamelCase__ : Any ) -> Any:
"""simple docstring"""
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : Any , UpperCamelCase__ : List[Any] ) -> int:
"""simple docstring"""
return self.decoder.get(UpperCamelCase__ )
def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
snake_case : Optional[int] = ''''''.join(UpperCamelCase__ )
snake_case : int = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowerCAmelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case : int = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case : List[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '''\n''' )
snake_case : str = 0
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
snake_case : List[str] = token_index
writer.write(''' '''.join(UpperCamelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self : List[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case : List[Any] = [self.sep_token_id]
snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=False , **UpperCamelCase__ : List[Any] ) -> int:
"""simple docstring"""
snake_case : Union[str, Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
snake_case : Dict = ''' ''' + text
return (text, kwargs)
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> str:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self : str , UpperCamelCase__ : "Conversation" ) -> List[int]:
"""simple docstring"""
snake_case : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase__ )
snake_case : List[str] = ''' '''.join(UpperCamelCase__ )
snake_case : Tuple = self.encode(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.model_max_length:
snake_case : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 638 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : List[Any] = '▁'
__snake_case : Optional[int] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
__snake_case : str = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
__snake_case : Optional[int] = {
'facebook/m2m100_418M': 1_024,
}
# fmt: off
__snake_case : Tuple = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
__a : Optional[Any] = VOCAB_FILES_NAMES
__a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = ["""input_ids""", """attention_mask"""]
__a : List[int] = []
__a : List[int] = []
def __init__( self , A , A , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<pad>" , A="<unk>" , A="m2m100" , A = None , A=8 , **A , ) ->None:
UpperCAmelCase__ :Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase__ :List[str] = language_codes
UpperCAmelCase__ :Optional[Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
UpperCAmelCase__ :Dict = {lang_code: F"""__{lang_code}__""" for lang_code in fairseq_language_code}
UpperCAmelCase__ :str = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A )
for lang_code in fairseq_language_code
if self.get_lang_token(A ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A , tgt_lang=A , bos_token=A , eos_token=A , sep_token=A , unk_token=A , pad_token=A , language_codes=A , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A , **A , )
UpperCAmelCase__ :Optional[int] = vocab_file
UpperCAmelCase__ :Any = load_json(A )
UpperCAmelCase__ :Any = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ :List[Any] = spm_file
UpperCAmelCase__ :List[str] = load_spm(A , self.sp_model_kwargs )
UpperCAmelCase__ :Optional[Any] = len(self.encoder )
UpperCAmelCase__ :Union[str, Any] = {
self.get_lang_token(A ): self.encoder_size + i for i, lang_code in enumerate(A )
}
UpperCAmelCase__ :Union[str, Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(A )}
UpperCAmelCase__ :Tuple = {v: k for k, v in self.lang_token_to_id.items()}
UpperCAmelCase__ :List[Any] = src_lang if src_lang is not None else 'en'
UpperCAmelCase__ :List[Any] = tgt_lang
UpperCAmelCase__ :List[str] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
UpperCAmelCase__ :List[str] = num_madeup_words
@property
def A__ ( self ) ->int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def A__ ( self ) ->str:
return self._src_lang
@src_lang.setter
def A__ ( self , A ) ->None:
UpperCAmelCase__ :int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A__ ( self , A ) ->List[str]:
return self.sp_model.encode(A , out_type=A )
def A__ ( self , A ) ->Any:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A , self.encoder[self.unk_token] )
def A__ ( self , A ) ->str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A , self.unk_token )
def A__ ( self , A ) ->str:
UpperCAmelCase__ :Dict = []
UpperCAmelCase__ :Union[str, Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
UpperCAmelCase__ :int = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def A__ ( self , A , A = None , A = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
UpperCAmelCase__ :List[Any] = [1] * len(self.prefix_tokens )
UpperCAmelCase__ :List[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A )) + suffix_ones
return prefix_ones + ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def A__ ( self , A , A = None ) ->List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A__ ( self ) ->Dict:
UpperCAmelCase__ :str = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) ->Dict:
UpperCAmelCase__ :List[Any] = self.__dict__.copy()
UpperCAmelCase__ :Optional[Any] = None
return state
def __setstate__( self , A ) ->None:
UpperCAmelCase__ :Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase__ :Dict = {}
UpperCAmelCase__ :int = load_spm(self.spm_file , self.sp_model_kwargs )
def A__ ( self , A , A = None ) ->Tuple[str]:
UpperCAmelCase__ :Any = Path(A )
if not save_dir.is_dir():
raise OSError(F"""{save_directory} should be a directory""" )
UpperCAmelCase__ :Any = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
UpperCAmelCase__ :Any = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A )
if os.path.abspath(self.spm_file ) != os.path.abspath(A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A )
elif not os.path.isfile(self.spm_file ):
with open(A , 'wb' ) as fi:
UpperCAmelCase__ :str = self.sp_model.serialized_model_proto()
fi.write(A )
return (str(A ), str(A ))
def A__ ( self , A , A = "en" , A = None , A = "ro" , **A , ) ->BatchEncoding:
UpperCAmelCase__ :Any = src_lang
UpperCAmelCase__ :Any = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A , A , **A )
def A__ ( self , A , A , A , **A ) ->int:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCAmelCase__ :Optional[Any] = src_lang
UpperCAmelCase__ :str = self(A , add_special_tokens=A , **A )
UpperCAmelCase__ :Optional[int] = self.get_lang_id(A )
UpperCAmelCase__ :List[Any] = tgt_lang_id
return inputs
def A__ ( self ) ->List[str]:
self.set_src_lang_special_tokens(self.src_lang )
def A__ ( self ) ->Tuple:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def A__ ( self , A ) ->None:
UpperCAmelCase__ :int = self.get_lang_token(A )
UpperCAmelCase__ :Optional[Any] = self.lang_token_to_id[lang_token]
UpperCAmelCase__ :str = [self.cur_lang_id]
UpperCAmelCase__ :List[str] = [self.eos_token_id]
def A__ ( self , A ) ->None:
UpperCAmelCase__ :Dict = self.get_lang_token(A )
UpperCAmelCase__ :Optional[int] = self.lang_token_to_id[lang_token]
UpperCAmelCase__ :Union[str, Any] = [self.cur_lang_id]
UpperCAmelCase__ :List[str] = [self.eos_token_id]
def A__ ( self , A ) ->str:
return self.lang_code_to_token[lang]
def A__ ( self , A ) ->int:
UpperCAmelCase__ :Dict = self.get_lang_token(A )
return self.lang_token_to_id[lang_token]
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :int = sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE )
spm.Load(str(SCREAMING_SNAKE_CASE ) )
return spm
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE )
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , indent=2 )
| 433 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
__a : Optional[int] = ["""image_processor""", """tokenizer"""]
__a : int = """OwlViTImageProcessor"""
__a : Optional[int] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , A=None , A=None , **A ) ->str:
UpperCAmelCase__ :str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , A , )
UpperCAmelCase__ :List[Any] = kwargs.pop('feature_extractor' )
UpperCAmelCase__ :Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A , A )
def __call__( self , A=None , A=None , A=None , A="max_length" , A="np" , **A ) ->Tuple:
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A , A ) or (isinstance(A , A ) and not isinstance(text[0] , A )):
UpperCAmelCase__ :Optional[Any] = [self.tokenizer(A , padding=A , return_tensors=A , **A )]
elif isinstance(A , A ) and isinstance(text[0] , A ):
UpperCAmelCase__ :Dict = []
# Maximum number of queries across batch
UpperCAmelCase__ :Dict = max([len(A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A ) != max_num_queries:
UpperCAmelCase__ :List[str] = t + [' '] * (max_num_queries - len(A ))
UpperCAmelCase__ :List[str] = self.tokenizer(A , padding=A , return_tensors=A , **A )
encodings.append(A )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
UpperCAmelCase__ :Any = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :int = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ :int = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :Optional[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ :List[str] = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
UpperCAmelCase__ :int = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ :List[str] = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :List[str] = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
UpperCAmelCase__ :Optional[int] = BatchEncoding()
UpperCAmelCase__ :Any = input_ids
UpperCAmelCase__ :str = attention_mask
if query_images is not None:
UpperCAmelCase__ :Optional[int] = BatchEncoding()
UpperCAmelCase__ :Tuple = self.image_processor(
A , return_tensors=A , **A ).pixel_values
UpperCAmelCase__ :str = query_pixel_values
if images is not None:
UpperCAmelCase__ :Optional[int] = self.image_processor(A , return_tensors=A , **A )
if text is not None and images is not None:
UpperCAmelCase__ :Optional[int] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ :int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) , tensor_type=A )
def A__ ( self , *A , **A ) ->Tuple:
return self.image_processor.post_process(*A , **A )
def A__ ( self , *A , **A ) ->Tuple:
return self.image_processor.post_process_object_detection(*A , **A )
def A__ ( self , *A , **A ) ->Any:
return self.image_processor.post_process_image_guided_detection(*A , **A )
def A__ ( self , *A , **A ) ->Optional[int]:
return self.tokenizer.batch_decode(*A , **A )
def A__ ( self , *A , **A ) ->Dict:
return self.tokenizer.decode(*A , **A )
@property
def A__ ( self ) ->Dict:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , A , )
return self.image_processor_class
@property
def A__ ( self ) ->Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , A , )
return self.image_processor
| 433 | 1 |
import itertools
import string
from collections.abc import Generator, Iterable
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = iter(lowerCAmelCase_)
while True:
lowerCamelCase_ : Dict = tuple(itertools.islice(lowerCAmelCase_ , lowerCAmelCase_))
if not chunk:
return
yield chunk
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = "".join([c.upper() for c in dirty if c in string.ascii_letters])
lowerCamelCase_ : Dict = ""
if len(lowerCAmelCase_) < 2:
return dirty
for i in range(len(lowerCAmelCase_) - 1):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowerCAmelCase_) & 1:
clean += "X"
return clean
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowerCamelCase_ : Dict = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowerCAmelCase_)
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowerCAmelCase_)
return table
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = generate_table(lowerCAmelCase_)
lowerCamelCase_ : Optional[Any] = prepare_input(lowerCAmelCase_)
lowerCamelCase_ : str = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCAmelCase_ , 2):
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = divmod(table.index(lowerCAmelCase_) , 5)
lowerCamelCase_ ,lowerCamelCase_ : Any = divmod(table.index(lowerCAmelCase_) , 5)
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = generate_table(lowerCAmelCase_)
lowerCamelCase_ : int = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCAmelCase_ , 2):
lowerCamelCase_ ,lowerCamelCase_ : List[str] = divmod(table.index(lowerCAmelCase_) , 5)
lowerCamelCase_ ,lowerCamelCase_ : List[str] = divmod(table.index(lowerCAmelCase_) , 5)
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 250 |
from __future__ import annotations
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = array[indexa], array[indexa]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if length > 1:
lowerCamelCase_ : str = int(length / 2)
for i in range(lowerCAmelCase_ , low + middle):
comp_and_swap(lowerCAmelCase_ , lowerCAmelCase_ , i + middle , lowerCAmelCase_)
bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
bitonic_merge(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if length > 1:
lowerCamelCase_ : Optional[int] = int(length / 2)
bitonic_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 1)
bitonic_sort(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , 0)
bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 250 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 435 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self , lowerCamelCase = 1 , lowerCamelCase = None , lowerCamelCase = 50 , lowerCamelCase = "pil" , lowerCamelCase = True , **lowerCamelCase , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
UpperCamelCase : Dict = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCamelCase , )
UpperCamelCase : Optional[int] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase : Dict = self.unet(lowerCamelCase , lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase : Any = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
UpperCamelCase : str = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase : List[Any] = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowerCamelCase ), "This is a local test"
| 435 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 658 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Tuple = ["""input_values""", """attention_mask"""]
def __init__( self , snake_case = 1 , snake_case = 16_000 , snake_case = 0.0 , snake_case = False , snake_case = 80 , snake_case = 16 , snake_case = 64 , snake_case = "hann_window" , snake_case = 1.0 , snake_case = 80 , snake_case = 7_600 , snake_case = 1E-10 , snake_case = 2 , snake_case = True , **snake_case , ) -> Dict:
"""simple docstring"""
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
a__ : Any = do_normalize
a__ : List[str] = return_attention_mask
a__ : List[Any] = num_mel_bins
a__ : List[str] = hop_length
a__ : int = win_length
a__ : List[Any] = win_function
a__ : List[str] = frame_signal_scale
a__ : List[Any] = fmin
a__ : Optional[Any] = fmax
a__ : Union[str, Any] = mel_floor
a__ : Union[str, Any] = reduction_factor
a__ : List[str] = win_length * sampling_rate // 1_000
a__ : List[Any] = hop_length * sampling_rate // 1_000
a__ : List[Any] = optimal_fft_length(self.sample_size )
a__ : Dict = (self.n_fft // 2) + 1
a__ : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case )
a__ : Tuple = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , snake_case , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , snake_case , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _snake_case ( snake_case , snake_case , snake_case = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
a__ : Tuple = np.array(snake_case , np.intaa )
a__ : List[str] = []
for vector, length in zip(snake_case , attention_mask.sum(-1 ) ):
a__ : List[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
a__ : List[str] = padding_value
normed_input_values.append(snake_case )
else:
a__ : Any = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _snake_case ( self , snake_case , ) -> np.ndarray:
"""simple docstring"""
a__ : str = spectrogram(
snake_case , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self , snake_case = None , snake_case = None , snake_case = False , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ) -> BatchFeature:
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
a__ : Dict = self._process_audio(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case , )
else:
a__ : Optional[int] = None
if audio_target is not None:
a__ : List[Any] = self._process_audio(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case , )
if inputs is None:
return inputs_target
else:
a__ : Tuple = inputs_target["input_values"]
a__ : Tuple = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
a__ : Tuple = decoder_attention_mask
return inputs
def _snake_case ( self , snake_case , snake_case = False , snake_case = False , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = None , **snake_case , ) -> BatchFeature:
"""simple docstring"""
a__ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
a__ : List[Any] = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a__ : int = [np.asarray(snake_case , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
a__ : Any = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
a__ : List[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
a__ : Union[str, Any] = [speech]
# needed to make pad() work on spectrogram inputs
a__ : Optional[Any] = self.feature_size
# convert into correct format for padding
if is_target:
a__ : List[str] = [self._extract_mel_features(snake_case ) for waveform in speech]
a__ : Optional[Any] = BatchFeature({"input_values": features} )
a__ : str = self.num_mel_bins
else:
a__ : int = BatchFeature({"input_values": speech} )
a__ : int = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
a__ : Any = feature_size_hack
# convert input values to correct format
a__ : Tuple = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
a__ : int = [np.asarray(snake_case , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(snake_case , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
a__ : Union[str, Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(snake_case , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
a__ : Optional[int] = input_values.astype(np.floataa )
# convert attention_mask to correct format
a__ : Optional[int] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
a__ : Tuple = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
a__ : Any = (
attention_mask
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a__ : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=snake_case , padding_value=self.padding_value )
if return_tensors is not None:
a__ : int = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
def _snake_case ( self ) -> Dict[str, Any]:
"""simple docstring"""
a__ : int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
a__ : str = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 112 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=False , ) -> int:
"""simple docstring"""
_a = size if size is not None else {'''height''': 20, '''width''': 20}
_a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_center_crop
_a = crop_size
_a = do_normalize
_a = image_mean
_a = image_std
_a = do_reduce_labels
def a__ (self ) -> Any:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCAmelCase ():
"""simple docstring"""
_a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''')
_a = Image.open(dataset[0]['''file'''])
_a = Image.open(dataset[1]['''file'''])
return image, map
def lowerCAmelCase ():
"""simple docstring"""
_a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''')
_a = Image.open(ds[0]['''file'''])
_a = Image.open(ds[1]['''file'''])
_a = Image.open(ds[2]['''file'''])
_a = Image.open(ds[3]['''file'''])
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = BeitImageProcessor if is_vision_available() else None
def a__ (self ) -> str:
"""simple docstring"""
_a = BeitImageProcessingTester(self )
@property
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
self.assertTrue(hasattr(A , '''do_center_crop''' ) )
self.assertTrue(hasattr(A , '''center_crop''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
def a__ (self ) -> int:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , A )
_a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=A )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , A )
def a__ (self ) -> Dict:
"""simple docstring"""
pass
def a__ (self ) -> int:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
_a = []
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_a = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
_a = image_processing(A , A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
_a , _a = prepare_semantic_single_inputs()
_a = image_processing(A , A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
_a , _a = prepare_semantic_batch_inputs()
_a = image_processing(A , A , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_a , _a = prepare_semantic_single_inputs()
_a = image_processing(A , A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
_a = True
_a = image_processing(A , A , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 352 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase (__A , __A , __A , __A , __A):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''')
if not scores:
raise ValueError('''Scores cannot be empty''')
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __A , __A , __A) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __A , __A , __A) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A) , )
)
def lowerCAmelCase ():
"""simple docstring"""
_a = [90, 23, 6, 33, 21, 65, 123, 34_423]
_a = math.log(len(__A) , 2)
print(F'''Optimal value : {minimax(0 , 0 , __A , __A , __A)}''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 352 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 1 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase_ ( lowerCAmelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
return EnvironmentCommand()
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ : ArgumentParser ):
lowerCAmelCase_ : Dict = parser.add_parser('env' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Dict = huggingface_hub.__version__
lowerCAmelCase_ : Optional[Any] = 'not installed'
lowerCAmelCase_ : Optional[int] = 'NA'
if is_torch_available():
import torch
lowerCAmelCase_ : List[str] = torch.__version__
lowerCAmelCase_ : str = torch.cuda.is_available()
lowerCAmelCase_ : Optional[int] = 'not installed'
if is_transformers_available():
import transformers
lowerCAmelCase_ : str = transformers.__version__
lowerCAmelCase_ : int = 'not installed'
if is_accelerate_available():
import accelerate
lowerCAmelCase_ : Tuple = accelerate.__version__
lowerCAmelCase_ : List[Any] = 'not installed'
if is_xformers_available():
import xformers
lowerCAmelCase_ : List[str] = xformers.__version__
lowerCAmelCase_ : List[str] = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"{pt_version} ({pt_cuda_available})",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(SCREAMING_SNAKE_CASE_ ) )
return info
@staticmethod
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ : Dict ):
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 317 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowercase__ : Optional[int] = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
lowercase__ : Optional[Any] = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : str = list(state_dict.keys() )
for name in state_dict_keys:
lowerCAmelCase_ : List[Any] = state_dict.pop(lowerCAmelCase__ )
# emb -> embedding
if name.startswith('emb.' ):
lowerCAmelCase_ : Dict = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
lowerCAmelCase_ : str = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
lowerCAmelCase_ : Optional[Any] = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , lowerCAmelCase__ )
# ffn -> feed_forward
lowerCAmelCase_ : Any = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , lowerCAmelCase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
lowerCAmelCase_ : str = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
lowerCAmelCase_ : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
lowerCAmelCase_ : Any = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
lowerCAmelCase_ : Optional[int] = 'rwkv.' + name
lowerCAmelCase_ : int = weight
return state_dict
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : int=None ) -> int:
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
lowerCAmelCase_ : int = 5_0277
lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
lowerCAmelCase_ : Dict = PreTrainedTokenizerFast(tokenizer_file=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = len(lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
# 2. Build the config
lowerCAmelCase_ : int = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowerCAmelCase_ : Tuple = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"`size` should be one of {possible_sizes}, got {size}." )
lowerCAmelCase_ : Dict = RwkvConfig(
vocab_size=lowerCAmelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowerCAmelCase__ )
# 3. Download model file then convert state_dict
lowerCAmelCase_ : Dict = hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = torch.load(lowerCAmelCase__ , map_location='cpu' )
lowerCAmelCase_ : int = convert_state_dict(lowerCAmelCase__ )
# 4. Split in shards and save
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = shard_checkpoint(lowerCAmelCase__ )
for shard_file, shard in shards.items():
torch.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
if index is not None:
lowerCAmelCase_ : List[str] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
# Save the index as well
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
lowerCAmelCase_ : str = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + '\n'
f.write(lowerCAmelCase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
lowerCAmelCase_ : List[str] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowerCAmelCase_ : List[Any] = torch.load(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
lowerCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ , max_shard_size='2GB' )
tokenizer.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
lowercase__ : List[str] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 317 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Any = logging.get_logger(__name__)
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Dict = ['''pixel_values''']
def __init__( self , snake_case = True , snake_case = None , snake_case = None , snake_case = PILImageResampling.BILINEAR , snake_case = True , snake_case = 1 / 255 , snake_case = True , snake_case = None , snake_case = None , **snake_case , ):
super().__init__(**snake_case )
snake_case_ = size if size is not None else {'shortest_edge': 384}
snake_case_ = get_size_dict(snake_case , default_to_square=snake_case )
snake_case_ = do_resize
snake_case_ = size
# Default value set here for backwards compatibility where the value in config is None
snake_case_ = crop_pct if crop_pct is not None else 224 / 256
snake_case_ = resample
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a ( self , snake_case , snake_case , snake_case , snake_case = PILImageResampling.BICUBIC , snake_case = None , **snake_case , ):
snake_case_ = get_size_dict(snake_case , default_to_square=snake_case )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
snake_case_ = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
snake_case_ = int(shortest_edge / crop_pct )
snake_case_ = get_resize_output_image_size(snake_case , size=snake_case , default_to_square=snake_case )
snake_case_ = resize(image=snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=snake_case , size=(shortest_edge, shortest_edge) , data_format=snake_case , **snake_case )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
snake_case , size=(shortest_edge, shortest_edge) , resample=snake_case , data_format=snake_case , **snake_case )
def a ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def a ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ):
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def a ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ):
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = crop_pct if crop_pct is not None else self.crop_pct
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(snake_case , default_to_square=snake_case )
snake_case_ = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(snake_case ) for image in images]
if do_resize:
snake_case_ = [self.resize(image=snake_case , size=snake_case , crop_pct=snake_case , resample=snake_case ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
snake_case_ = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
snake_case_ = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
snake_case_ = {'pixel_values': images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 362 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : int = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 362 | 1 |
def __UpperCamelCase ( lowerCAmelCase__ : int ):
__a : Optional[Any] = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __UpperCamelCase ( lowerCAmelCase__ : int = 5_0_0_0 ):
__a : List[str] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase__ )]
for i, pentagonal_i in enumerate(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
__a : Union[str, Any] = pentagonal_nums[j]
__a : Optional[Any] = pentagonal_i + pentagonal_j
__a : List[str] = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase__ ) and is_pentagonal(lowerCAmelCase__ ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 326 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __UpperCamelCase ( lowerCAmelCase__ : Any ):
__a : Dict = filter(lambda lowerCAmelCase__ : p.requires_grad , model.parameters() )
__a : Tuple = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowercase__ =logging.getLogger(__name__)
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] ):
if metric == "rouge2":
__a : List[Any] = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__a : List[str] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__a : Optional[Any] = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
''' function.''' )
__a : List[Any] = ModelCheckpoint(
dirpath=lowerCAmelCase__ , filename=lowerCAmelCase__ , monitor=f"val_{metric}" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ):
return EarlyStopping(
monitor=f"val_{metric}" , mode='''min''' if '''loss''' in metric else '''max''' , patience=lowerCAmelCase__ , verbose=lowerCAmelCase__ , )
class UpperCamelCase__ ( pl.Callback ):
def lowerCAmelCase (self : List[str] , snake_case_ : Any , snake_case_ : Any ):
__a : Optional[int] = {f"lr_group_{i}": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case_ )
@rank_zero_only
def lowerCAmelCase (self : str , snake_case_ : pl.Trainer , snake_case_ : pl.LightningModule , snake_case_ : str , snake_case_ : Dict=True ):
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
__a : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__a : Union[str, Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
__a : Union[str, Any] = od / '''test_results.txt'''
__a : Optional[Any] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__a : Optional[int] = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
__a : List[str] = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=snake_case_ )
generations_file.parent.mkdir(exist_ok=snake_case_ )
with open(snake_case_ , '''a+''' ) as writer:
for key in sorted(snake_case_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__a : Tuple = metrics[key]
if isinstance(snake_case_ , torch.Tensor ):
__a : Optional[int] = val.item()
__a : List[str] = f"{key}: {val:.6f}\n"
writer.write(snake_case_ )
if not save_generations:
return
if "preds" in metrics:
__a : Optional[Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(snake_case_ )
@rank_zero_only
def lowerCAmelCase (self : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
try:
__a : Union[str, Any] = pl_module.model.model.num_parameters()
except AttributeError:
__a : int = pl_module.model.num_parameters()
__a : Any = count_trainable_parameters(snake_case_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase (self : Optional[int] , snake_case_ : pl.Trainer , snake_case_ : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case_ , snake_case_ , '''test''' )
@rank_zero_only
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : pl.Trainer , snake_case_ : str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 326 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if (ksize % 2) == 0:
__SCREAMING_SNAKE_CASE = ksize + 1
__SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCAmelCase ):
for x in range(_lowerCAmelCase ):
# distance from center
__SCREAMING_SNAKE_CASE = x - ksize // 2
__SCREAMING_SNAKE_CASE = y - ksize // 2
# degree to radiant
__SCREAMING_SNAKE_CASE = theta / 180 * np.pi
__SCREAMING_SNAKE_CASE = np.cos(_theta )
__SCREAMING_SNAKE_CASE = np.sin(_theta )
# get kernel x
__SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py
# get kernel y
__SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py
# fill kernel
__SCREAMING_SNAKE_CASE = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
a = imread("../image_data/lena.jpg")
# turn image in gray scale value
a = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
a = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
a = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
a = out / out.max() * 255
a = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 109 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _A ( ):
"""simple docstring"""
__lowercase =os.path.dirname(os.path.realpath(_lowerCAmelCase ) )
__lowercase =os.path.join(_lowerCAmelCase , 'words.txt' )
__lowercase =''
with open(_lowerCAmelCase ) as f:
__lowercase =f.readline()
__lowercase =[word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
__lowercase =[
word
for word in [sum(ord(_lowerCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 474 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any]=13 , _lowercase : Optional[Any]=32 , _lowercase : Any=3 , _lowercase : Union[str, Any]=4 , _lowercase : Optional[int]=[10, 20, 30, 40] , _lowercase : int=[2, 2, 3, 2] , _lowercase : str=True , _lowercase : Optional[Any]=True , _lowercase : Union[str, Any]=37 , _lowercase : str="gelu" , _lowercase : List[str]=10 , _lowercase : int=0.02 , _lowercase : Tuple=["stage2", "stage3", "stage4"] , _lowercase : List[str]=3 , _lowercase : Dict=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = num_stages
SCREAMING_SNAKE_CASE__ = hidden_sizes
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = out_features
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = num_stages
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def __a ( self : str ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __a ( self : Optional[int] ):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowercase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowercase , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def __a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UperNetForSemanticSegmentation(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCAmelCase_ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UperNetModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def __a ( self : Tuple ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self : Optional[int] ):
"""simple docstring"""
return
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def __a ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def __a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def __a ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def __a ( self : List[Any] ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def __a ( self : str ):
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __a ( self : Tuple ):
"""simple docstring"""
pass
def __a ( self : Optional[Any] ):
"""simple docstring"""
def check_hidden_states_output(_lowercase : str , _lowercase : Tuple , _lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(_lowercase , _lowercase ) )
SCREAMING_SNAKE_CASE__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = _config_zero_init(_lowercase )
SCREAMING_SNAKE_CASE__ = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(config=_lowercase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def __a ( self : List[str] ):
"""simple docstring"""
pass
@slow
def __a ( self : List[str] ):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = UperNetForSemanticSegmentation.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
SCREAMING_SNAKE_CASE__ = Image.open(__UpperCamelCase ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
SCREAMING_SNAKE_CASE__ = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_lowercase )
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**_lowercase )
SCREAMING_SNAKE_CASE__ = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , _lowercase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1E-4 ) )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
SCREAMING_SNAKE_CASE__ = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_lowercase )
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**_lowercase )
SCREAMING_SNAKE_CASE__ = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , _lowercase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1E-4 ) )
| 379 | import enum
import shutil
import sys
__lowerCamelCase , __lowerCamelCase : Optional[Any] = shutil.get_terminal_size()
__lowerCamelCase : Any = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''}
class __snake_case ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]="" ) -> int:
"""simple docstring"""
sys.stdout.write(str(__UpperCamelCase ) + end )
sys.stdout.flush()
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any]="" ) -> Dict:
"""simple docstring"""
forceWrite(f"""\u001b[{color}m{content}\u001b[0m""" , __UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
forceWrite("""\r""" )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : str ) -> str:
"""simple docstring"""
forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def __SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 379 | 1 |
from math import factorial
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(__lowerCAmelCase ) // (factorial(__lowerCAmelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
F"""fifty-two card deck is: {combinations(5_2, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
F"""4 for group projects, there are {combinations(4_0, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
F"""are {combinations(1_0, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 33 |
'''simple docstring'''
def __UpperCamelCase ( lowercase_ : int = 1_000_000 ):
"""simple docstring"""
a_ = set(range(3 , lowercase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowercase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase_ , lowercase_ ) ) )
a_ = [float(lowercase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase_ , limit + 1 , lowercase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 536 | 0 |
import argparse
import os
import re
import packaging.version
A_ = "examples/"
A_ = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A_ = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A_ = "README.md"
def A_ ( snake_case , snake_case , snake_case ):
with open(snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE:Union[str, Any] = f.read()
SCREAMING_SNAKE_CASE:str = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE:str = replace.replace("VERSION" , snake_case )
SCREAMING_SNAKE_CASE:str = re_pattern.sub(snake_case , snake_case )
with open(snake_case , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(snake_case )
def A_ ( snake_case ):
for folder, directories, fnames in os.walk(snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(snake_case , snake_case ) , snake_case , pattern="examples" )
def A_ ( snake_case , snake_case=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case , snake_case , snake_case )
if not patch:
update_version_in_examples(snake_case )
def A_ ( ):
SCREAMING_SNAKE_CASE:str = "🤗 Transformers currently provides the following architectures"
SCREAMING_SNAKE_CASE:List[str] = "1. Want to contribute a new model?"
with open(snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE:int = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE:Tuple = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE:Union[str, Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
SCREAMING_SNAKE_CASE:List[str] = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(snake_case , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(snake_case )
def A_ ( ):
with open(REPLACE_FILES["init"] , "r" ) as f:
SCREAMING_SNAKE_CASE:List[str] = f.read()
SCREAMING_SNAKE_CASE:Optional[Any] = REPLACE_PATTERNS["init"][0].search(snake_case ).groups()[0]
return packaging.version.parse(snake_case )
def A_ ( snake_case=False ):
SCREAMING_SNAKE_CASE:int = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE:Dict = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE:str = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
SCREAMING_SNAKE_CASE:Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE:List[Any] = input(F'''Which version are you releasing? [{default_version}]''' )
if len(snake_case ) == 0:
SCREAMING_SNAKE_CASE:int = default_version
print(F'''Updating version to {version}.''' )
global_version_update(snake_case , patch=snake_case )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def A_ ( ):
SCREAMING_SNAKE_CASE:Tuple = get_version()
SCREAMING_SNAKE_CASE:Dict = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
SCREAMING_SNAKE_CASE:str = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE:List[Any] = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(snake_case ) == 0:
SCREAMING_SNAKE_CASE:int = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(snake_case )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 700 |
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
_A : List[Any] = '''mask2former'''
_A : str = ['''swin''']
_A : Tuple = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Dict] = None ,SCREAMING_SNAKE_CASE__ : int = 256 ,SCREAMING_SNAKE_CASE__ : int = 256 ,SCREAMING_SNAKE_CASE__ : int = 256 ,SCREAMING_SNAKE_CASE__ : int = 1_024 ,SCREAMING_SNAKE_CASE__ : str = "relu" ,SCREAMING_SNAKE_CASE__ : int = 6 ,SCREAMING_SNAKE_CASE__ : int = 10 ,SCREAMING_SNAKE_CASE__ : int = 8 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : int = 2_048 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : int = 4 ,SCREAMING_SNAKE_CASE__ : int = 255 ,SCREAMING_SNAKE_CASE__ : int = 100 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : float = 2.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : int = 12_544 ,SCREAMING_SNAKE_CASE__ : float = 3.0 ,SCREAMING_SNAKE_CASE__ : float = 0.75 ,SCREAMING_SNAKE_CASE__ : float = 0.02 ,SCREAMING_SNAKE_CASE__ : float = 1.0 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 16, 32] ,SCREAMING_SNAKE_CASE__ : bool = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
SCREAMING_SNAKE_CASE:Union[str, Any] = CONFIG_MAPPING["swin"](
image_size=224 ,in_channels=3 ,patch_size=4 ,embed_dim=96 ,depths=[2, 2, 18, 2] ,num_heads=[3, 6, 12, 24] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=SCREAMING_SNAKE_CASE__ ,out_features=["stage1", "stage2", "stage3", "stage4"] ,)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:Any = backbone_config.pop("model_type" )
SCREAMING_SNAKE_CASE:List[str] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE:List[str] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
SCREAMING_SNAKE_CASE:List[str] = backbone_config
SCREAMING_SNAKE_CASE:Union[str, Any] = feature_size
SCREAMING_SNAKE_CASE:Union[str, Any] = mask_feature_size
SCREAMING_SNAKE_CASE:str = hidden_dim
SCREAMING_SNAKE_CASE:Optional[int] = encoder_feedforward_dim
SCREAMING_SNAKE_CASE:Tuple = activation_function
SCREAMING_SNAKE_CASE:Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE:List[Any] = decoder_layers
SCREAMING_SNAKE_CASE:Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE:Dict = dropout
SCREAMING_SNAKE_CASE:List[str] = dim_feedforward
SCREAMING_SNAKE_CASE:int = pre_norm
SCREAMING_SNAKE_CASE:Union[str, Any] = enforce_input_projection
SCREAMING_SNAKE_CASE:Any = common_stride
SCREAMING_SNAKE_CASE:int = ignore_value
SCREAMING_SNAKE_CASE:List[Any] = num_queries
SCREAMING_SNAKE_CASE:Dict = no_object_weight
SCREAMING_SNAKE_CASE:str = class_weight
SCREAMING_SNAKE_CASE:Tuple = mask_weight
SCREAMING_SNAKE_CASE:Optional[int] = dice_weight
SCREAMING_SNAKE_CASE:int = train_num_points
SCREAMING_SNAKE_CASE:str = oversample_ratio
SCREAMING_SNAKE_CASE:str = importance_sample_ratio
SCREAMING_SNAKE_CASE:str = init_std
SCREAMING_SNAKE_CASE:Any = init_xavier_std
SCREAMING_SNAKE_CASE:List[Any] = use_auxiliary_loss
SCREAMING_SNAKE_CASE:Union[str, Any] = feature_strides
SCREAMING_SNAKE_CASE:Union[str, Any] = output_auxiliary_logits
SCREAMING_SNAKE_CASE:Dict = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__ )
@classmethod
def __UpperCamelCase ( cls : Tuple ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE:str = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE:List[str] = self.__class__.model_type
return output
| 465 | 0 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__a : Tuple = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
__a : str = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
__a : Optional[Any] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n"
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> int:
"""simple docstring"""
return float((preds == labels).mean() )
def _SCREAMING_SNAKE_CASE ( __lowercase : Union[str, Any] , __lowercase : str ) -> Tuple:
"""simple docstring"""
__A = simple_accuracy(lowercase__ , lowercase__ )
__A = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _SCREAMING_SNAKE_CASE ( __lowercase : Any , __lowercase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__A = float(pearsonr(lowercase__ , lowercase__ )[0] )
__A = float(spearmanr(lowercase__ , lowercase__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any ):
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 637 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : List[str] = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_A : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__snake_case , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def __magic_name__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> Tuple:
lowercase : int = _distribute_shards(**__snake_case )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def __magic_name__ ( __snake_case : List[str] , __snake_case : Any , __snake_case : Optional[int] ) -> Optional[Any]:
lowercase : Any = _split_gen_kwargs(__snake_case , __snake_case )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def __magic_name__ ( __snake_case : str , __snake_case : Union[str, Any] ) -> List[str]:
if expected is RuntimeError:
with pytest.raises(__snake_case ):
_number_of_shards_in_gen_kwargs(__snake_case )
else:
lowercase : Any = _number_of_shards_in_gen_kwargs(__snake_case )
assert out == expected
| 518 | 0 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__=None , lowerCAmelCase__=None ):
return field(default_factory=lambda: default , metadata=lowerCAmelCase__ )
@dataclass
class lowercase__ :
'''simple docstring'''
UpperCamelCase = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
UpperCamelCase = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
UpperCamelCase = list_field(
default=[8, 32, 1_28, 5_12] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
UpperCamelCase = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
UpperCamelCase = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Benchmark training of model'''} )
UpperCamelCase = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Verbose memory tracing'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
UpperCamelCase = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Trace memory line by line'''} )
UpperCamelCase = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save result to a CSV file'''} )
UpperCamelCase = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save all print statements in a log file'''} )
UpperCamelCase = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to print environment information'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
UpperCamelCase = field(
default=F"inference_time_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
UpperCamelCase = field(
default=F"inference_memory_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
UpperCamelCase = field(
default=F"train_time_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
UpperCamelCase = field(
default=F"train_memory_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
UpperCamelCase = field(
default=F"env_info_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
UpperCamelCase = field(
default=F"log_{round(time() )}.csv" , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
UpperCamelCase = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , _UpperCAmelCase , )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 82 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **_UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , "vision" )
self.check_model_type(_UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , _UpperCAmelCase : Union[str, List[str]] = None , **_UpperCAmelCase : Optional[int] , ) -> List[Any]:
'''simple docstring'''
if "text_queries" in kwargs:
UpperCAmelCase_ = kwargs.pop("text_queries" )
if isinstance(_UpperCAmelCase , (str, Image.Image) ):
UpperCAmelCase_ = {"image": image, "candidate_labels": candidate_labels}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
return results
def lowercase__ ( self : str , **_UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {}
if "threshold" in kwargs:
UpperCAmelCase_ = kwargs["threshold"]
if "top_k" in kwargs:
UpperCAmelCase_ = kwargs["top_k"]
return {}, {}, postprocess_params
def lowercase__ ( self : int , _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = load_image(inputs["image"] )
UpperCAmelCase_ = inputs["candidate_labels"]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = candidate_labels.split("," )
UpperCAmelCase_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_UpperCAmelCase ):
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(_UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self : int , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = model_inputs.pop("target_size" )
UpperCAmelCase_ = model_inputs.pop("candidate_label" )
UpperCAmelCase_ = model_inputs.pop("is_last" )
UpperCAmelCase_ = self.model(**_UpperCAmelCase )
UpperCAmelCase_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def lowercase__ ( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : List[str]=None ) -> int:
'''simple docstring'''
UpperCAmelCase_ = []
for model_output in model_outputs:
UpperCAmelCase_ = model_output["candidate_label"]
UpperCAmelCase_ = BaseModelOutput(_UpperCAmelCase )
UpperCAmelCase_ = self.image_processor.post_process_object_detection(
outputs=_UpperCAmelCase , threshold=_UpperCAmelCase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
UpperCAmelCase_ = outputs["scores"][index].item()
UpperCAmelCase_ = self._get_bounding_box(outputs["boxes"][index][0] )
UpperCAmelCase_ = {"score": score, "label": label, "box": box}
results.append(_UpperCAmelCase )
UpperCAmelCase_ = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )
if top_k:
UpperCAmelCase_ = results[:top_k]
return results
def lowercase__ ( self : str , _UpperCAmelCase : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = box.int().tolist()
UpperCAmelCase_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class UpperCAmelCase_ ( __UpperCAmelCase ):
"""simple docstring"""
lowercase = "luke"
def __init__( self : Optional[int] , snake_case_ : List[str]=50_267 , snake_case_ : List[Any]=500_000 , snake_case_ : int=768 , snake_case_ : Tuple=256 , snake_case_ : str=12 , snake_case_ : Tuple=12 , snake_case_ : Union[str, Any]=3_072 , snake_case_ : List[str]="gelu" , snake_case_ : Optional[Any]=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : Tuple=512 , snake_case_ : List[Any]=2 , snake_case_ : int=0.02 , snake_case_ : Optional[int]=1E-1_2 , snake_case_ : Union[str, Any]=True , snake_case_ : Union[str, Any]=None , snake_case_ : Union[str, Any]=1 , snake_case_ : str=0 , snake_case_ : List[Any]=2 , **snake_case_ : Tuple , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
snake_case__ : List[str] = vocab_size
snake_case__ : Union[str, Any] = entity_vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : Optional[Any] = entity_emb_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : int = intermediate_size
snake_case__ : Dict = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Tuple = max_position_embeddings
snake_case__ : List[Any] = type_vocab_size
snake_case__ : str = initializer_range
snake_case__ : Optional[Any] = layer_norm_eps
snake_case__ : Union[str, Any] = use_entity_aware_attention
snake_case__ : str = classifier_dropout
| 701 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Any = hf_hub_url(repo_id=_lowerCAmelCase , path=_lowerCAmelCase , revision=_lowerCAmelCase )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(_lowerCAmelCase )}"
| 301 | 0 |
'''simple docstring'''
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ="philschmid/bart-large-cnn-samsum"
__a =(
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
__a ="summarizer"
__a =AutoTokenizer
__a =AutoModelForSeqaSeqLM
__a =["text"]
__a =["text"]
def UpperCamelCase__ ( self : List[Any] , __a : Tuple ):
return self.pre_processor(UpperCAmelCase_ , return_tensors="pt" , truncation=UpperCAmelCase_ )
def UpperCamelCase__ ( self : str , __a : str ):
return self.model.generate(**UpperCAmelCase_ )[0]
def UpperCamelCase__ ( self : Tuple , __a : str ):
return self.pre_processor.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
| 692 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __snake_case ( SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
if (ksize % 2) == 0:
_lowerCAmelCase = ksize + 1
_lowerCAmelCase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE ):
for x in range(SCREAMING_SNAKE_CASE ):
# distance from center
_lowerCAmelCase = x - ksize // 2
_lowerCAmelCase = y - ksize // 2
# degree to radiant
_lowerCAmelCase = theta / 180 * np.pi
_lowerCAmelCase = np.cos(_theta )
_lowerCAmelCase = np.sin(_theta )
# get kernel x
_lowerCAmelCase = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
_snake_case = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 2_5_5
_snake_case = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 580 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : float = 3.0
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int ) -> List[str]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=lowercase_ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCAmelCase : Optional[int] = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCAmelCase : Union[str, Any] = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCAmelCase : Union[str, Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , lowercase_ )
@require_multi_gpu
def UpperCAmelCase_ ( self : Any ) -> List[str]:
UpperCAmelCase : Optional[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowercase__ = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase__ = torch.nn.Linear(100, 200)
lowercase__ = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase__ = ""
lowercase__ = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 1 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCAmelCase_ ( snake_case__ = "laptop" ) -> DataFrame:
"""simple docstring"""
lowerCAmelCase__ = f'https://www.amazon.in/laptop/s?k={product}'
lowerCAmelCase__ = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
lowerCAmelCase__ = BeautifulSoup(requests.get(__lowerCamelCase , headers=__lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
lowerCAmelCase__ = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
lowerCAmelCase__ = item.ha.text
lowerCAmelCase__ = 'https://www.amazon.in/' + item.ha.a['href']
lowerCAmelCase__ = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
lowerCAmelCase__ = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
lowerCAmelCase__ = 'Not available'
try:
lowerCAmelCase__ = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
lowerCAmelCase__ = ''
try:
lowerCAmelCase__ = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
lowerCAmelCase__ = float('nan' )
except AttributeError:
pass
lowerCAmelCase__ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCAmelCase__ = ' '
lowerCAmelCase__ = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_lowerCAmelCase : Dict = "headphones"
get_amazon_product_data(product).to_csv(f"""Amazon Product Data for {product}.csv""")
| 193 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _lowerCamelCase (__lowerCamelCase : str ) -> None:
a__ , a__ = analyze_text(__lowerCamelCase )
a__ = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
a__ = sum(single_char_strings.values() )
# one length string
a__ = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
a__ = single_char_strings[ch]
a__ = my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
a__ = sum(two_char_strings.values() )
a__ = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
a__ = cha + cha
if sequence in two_char_strings:
a__ = two_char_strings[sequence]
a__ = int(__lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def _lowerCamelCase (__lowerCamelCase : str ) -> tuple[dict, dict]:
a__ = Counter() # type: ignore
a__ = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _lowerCamelCase () -> Any:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 489 | 0 |
'''simple docstring'''
def a ( A__ : list[int] , A__ : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_lowercase =equationa
_lowercase =equationa
# Calculate the determinants of the matrices
_lowercase =aa * ba - aa * ba
_lowercase =ca * ba - ca * ba
_lowercase =aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowercase =determinant_x / determinant
_lowercase =determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 708 |
import os
def a ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.dirname(A__ ) + '/grid.txt' ) as f:
_lowercase =[] # noqa: E741
for _ in range(20 ):
l.append([int(A__ ) for x in f.readline().split()] )
_lowercase =0
# right
for i in range(20 ):
for j in range(17 ):
_lowercase =l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_lowercase =temp
# down
for i in range(17 ):
for j in range(20 ):
_lowercase =l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_lowercase =temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_lowercase =l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_lowercase =temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_lowercase =l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_lowercase =temp
return maximum
if __name__ == "__main__":
print(solution())
| 380 | 0 |
"""simple docstring"""
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowercase : Optional[Any] = 'pytorch_model.bin'
_lowercase : Dict = 'pytorch_model.bin.index.json'
_lowercase : Tuple = 'adapter_config.json'
_lowercase : Optional[int] = 'adapter_model.bin'
_lowercase : List[str] = 'adapter_model.safetensors'
_lowercase : Tuple = 'tf_model.h5'
_lowercase : Tuple = 'tf_model.h5.index.json'
_lowercase : List[Any] = 'model.ckpt'
_lowercase : Tuple = 'flax_model.msgpack'
_lowercase : Union[str, Any] = 'flax_model.msgpack.index.json'
_lowercase : Union[str, Any] = 'model.safetensors'
_lowercase : Optional[Any] = 'model.safetensors.index.json'
_lowercase : Optional[int] = 'config.json'
_lowercase : Union[str, Any] = 'preprocessor_config.json'
_lowercase : Union[str, Any] = FEATURE_EXTRACTOR_NAME
_lowercase : List[str] = 'generation_config.json'
_lowercase : Any = 'modelcard.json'
_lowercase : List[Any] = '▁'
_lowercase : Optional[int] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowercase : Optional[int] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowercase : Dict = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowercase : Any = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def lowercase__ ( snake_case_ :Optional[int] ):
if version.parse(_A ) < version.parse(_A ):
if "dev" in min_version:
__UpperCAmelCase = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__UpperCAmelCase = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 49 |
"""simple docstring"""
def A ( _A = 100 ):
"""simple docstring"""
snake_case_ :int = set()
snake_case_ :Dict = 0
snake_case_ :str = n + 1 # maximum limit
for a in range(2, _A ):
for b in range(2, _A ):
snake_case_ :Optional[Any] = a**b # calculates the current power
collect_powers.add(_A ) # adds the result to the set
return len(_A )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 584 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( _snake_case ):
UpperCamelCase__ : Tuple =["image_processor", "tokenizer"]
UpperCamelCase__ : Dict ="ViTImageProcessor"
UpperCamelCase__ : str =("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self :Optional[Any] , _lowercase :Tuple=None , _lowercase :Optional[int]=None , **_lowercase :Tuple) -> List[str]:
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _lowercase , )
UpperCAmelCase_ = kwargs.pop('''feature_extractor''')
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(_lowercase , _lowercase)
def __call__( self :Dict , _lowercase :Optional[int]=None , _lowercase :Optional[Any]=None , _lowercase :Any=None , _lowercase :int=None , **_lowercase :List[Any]) -> int:
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''')
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''')
if text is not None:
UpperCAmelCase_ = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase)
if visual_prompt is not None:
UpperCAmelCase_ = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase)
if images is not None:
UpperCAmelCase_ = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase)
if visual_prompt is not None and images is not None:
UpperCAmelCase_ = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase_ = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_lowercase) , tensor_type=_lowercase)
def __a ( self :Optional[Any] , *_lowercase :List[str] , **_lowercase :List[str]) -> Any:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase)
def __a ( self :int , *_lowercase :Any , **_lowercase :List[str]) -> Dict:
return self.tokenizer.decode(*_lowercase , **_lowercase)
@property
def __a ( self :List[Any]) -> List[str]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _lowercase , )
return self.image_processor_class
@property
def __a ( self :Optional[int]) -> str:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _lowercase , )
return self.image_processor
| 561 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] ="instructblip_vision_model"
def __init__( self :int , _lowercase :List[str]=1408 , _lowercase :Dict=6144 , _lowercase :List[Any]=39 , _lowercase :List[Any]=16 , _lowercase :Union[str, Any]=224 , _lowercase :int=14 , _lowercase :Any="gelu" , _lowercase :Optional[Any]=1E-6 , _lowercase :List[Any]=0.0 , _lowercase :Union[str, Any]=1E-1_0 , _lowercase :List[Any]=True , **_lowercase :Optional[int] , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = qkv_bias
@classmethod
def __a ( cls :Union[str, Any] , _lowercase :Union[str, os.PathLike] , **_lowercase :Tuple) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowercase)
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_lowercase , **_lowercase)
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
UpperCAmelCase_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(_lowercase , **_lowercase)
class a_ ( _snake_case ):
UpperCamelCase__ : str ="instructblip_qformer"
def __init__( self :List[Any] , _lowercase :List[Any]=30522 , _lowercase :str=768 , _lowercase :str=12 , _lowercase :int=12 , _lowercase :str=3072 , _lowercase :Optional[Any]="gelu" , _lowercase :Optional[int]=0.1 , _lowercase :int=0.1 , _lowercase :List[Any]=512 , _lowercase :Any=0.02 , _lowercase :Dict=1E-1_2 , _lowercase :int=0 , _lowercase :Any="absolute" , _lowercase :Optional[int]=2 , _lowercase :Optional[int]=1408 , **_lowercase :List[Any] , ) -> str:
super().__init__(pad_token_id=_lowercase , **_lowercase)
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = cross_attention_frequency
UpperCAmelCase_ = encoder_hidden_size
@classmethod
def __a ( cls :Any , _lowercase :Union[str, os.PathLike] , **_lowercase :int) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowercase)
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_lowercase , **_lowercase)
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
UpperCAmelCase_ = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(_lowercase , **_lowercase)
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] ="instructblip"
UpperCamelCase__ : int =True
def __init__( self :List[Any] , _lowercase :Tuple=None , _lowercase :Tuple=None , _lowercase :Any=None , _lowercase :Any=32 , **_lowercase :Union[str, Any]) -> List[str]:
super().__init__(**_lowercase)
if vision_config is None:
UpperCAmelCase_ = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''')
if qformer_config is None:
UpperCAmelCase_ = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''')
if text_config is None:
UpperCAmelCase_ = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''')
UpperCAmelCase_ = InstructBlipVisionConfig(**_lowercase)
UpperCAmelCase_ = InstructBlipQFormerConfig(**_lowercase)
UpperCAmelCase_ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
UpperCAmelCase_ = CONFIG_MAPPING[text_model_type](**_lowercase)
UpperCAmelCase_ = self.text_config.tie_word_embeddings
UpperCAmelCase_ = self.text_config.is_encoder_decoder
UpperCAmelCase_ = num_query_tokens
UpperCAmelCase_ = self.vision_config.hidden_size
UpperCAmelCase_ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.02
@classmethod
def __a ( cls :Tuple , _lowercase :InstructBlipVisionConfig , _lowercase :InstructBlipQFormerConfig , _lowercase :PretrainedConfig , **_lowercase :str , ) -> Union[str, Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowercase , )
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = copy.deepcopy(self.__dict__)
UpperCAmelCase_ = self.vision_config.to_dict()
UpperCAmelCase_ = self.qformer_config.to_dict()
UpperCAmelCase_ = self.text_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 561 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.