code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ : List[Any] = data_utils.TransfoXLTokenizer
snake_case_ : int = data_utils.TransfoXLCorpus
snake_case_ : List[Any] = data_utils
snake_case_ : int = data_utils
def A (__A : Dict , __A : List[Any] , __A : Union[str, Any] , __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__A , '''rb''' ) as fp:
UpperCAmelCase_ = pickle.load(__A , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
UpperCAmelCase_ = corpus.vocab.__dict__
torch.save(__A , __A )
UpperCAmelCase_ = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __A )
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__A , __A )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCAmelCase_ = os.path.abspath(__A )
UpperCAmelCase_ = os.path.abspath(__A )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCAmelCase_ = TransfoXLConfig()
else:
UpperCAmelCase_ = TransfoXLConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = TransfoXLLMHeadModel(__A )
UpperCAmelCase_ = load_tf_weights_in_transfo_xl(__A , __A , __A )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(__A , __A )
UpperCAmelCase_ = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
snake_case_ : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 51 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Dict = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : List[Any] = """yolos"""
def __init__( self : List[str] , __lowerCamelCase : Any=768 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : List[str]=3_072 , __lowerCamelCase : str="gelu" , __lowerCamelCase : str=0.0 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : List[Any]=1E-12 , __lowerCamelCase : str=[512, 864] , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[Any]=100 , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple=5 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : int=0.1 , **__lowerCamelCase : Optional[int] , ):
super().__init__(**__lowerCamelCase )
UpperCamelCase :Union[str, Any] = hidden_size
UpperCamelCase :Tuple = num_hidden_layers
UpperCamelCase :str = num_attention_heads
UpperCamelCase :Optional[int] = intermediate_size
UpperCamelCase :Dict = hidden_act
UpperCamelCase :List[str] = hidden_dropout_prob
UpperCamelCase :Any = attention_probs_dropout_prob
UpperCamelCase :List[str] = initializer_range
UpperCamelCase :str = layer_norm_eps
UpperCamelCase :Dict = image_size
UpperCamelCase :str = patch_size
UpperCamelCase :Optional[int] = num_channels
UpperCamelCase :Any = qkv_bias
UpperCamelCase :Any = num_detection_tokens
UpperCamelCase :Optional[int] = use_mid_position_embeddings
UpperCamelCase :Any = auxiliary_loss
# Hungarian matcher
UpperCamelCase :Any = class_cost
UpperCamelCase :Union[str, Any] = bbox_cost
UpperCamelCase :List[str] = giou_cost
# Loss coefficients
UpperCamelCase :Dict = bbox_loss_coefficient
UpperCamelCase :Any = giou_loss_coefficient
UpperCamelCase :int = eos_coefficient
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Tuple = version.parse("""1.11""" )
@property
def _A ( self : Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _A ( self : List[str] ):
return 1E-4
@property
def _A ( self : List[Any] ):
return 12
| 62 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
UpperCAmelCase_ : List[Any] = {
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
UpperCAmelCase_ : List[str] = '''▁'''
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[int] = VOCAB_FILES_NAMES
snake_case__ : int = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : str="<mask>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase :int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
UpperCamelCase :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
UpperCamelCase :Union[str, Any] = vocab_file
UpperCamelCase :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
UpperCamelCase :Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
UpperCamelCase :Tuple = len(self.sp_model ) - 1
UpperCamelCase :List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _A ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase :Any = [self.cls_token_id]
UpperCamelCase :Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _A ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _A ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
UpperCamelCase :Any = [self.sep_token_id]
UpperCamelCase :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _A ( self : List[Any] ):
return len(self.sp_model )
def _A ( self : Any ):
UpperCamelCase :Optional[int] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self : int , __lowerCamelCase : str ):
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def _A ( self : Dict , __lowerCamelCase : Optional[int] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase :List[Any] = self.sp_model.PieceToId(__lowerCamelCase )
return spm_id if spm_id else self.unk_token_id
def _A ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__lowerCamelCase )
def _A ( self : Optional[int] , __lowerCamelCase : Union[str, Any] ):
UpperCamelCase :List[Any] = []
UpperCamelCase :str = """"""
UpperCamelCase :Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
UpperCamelCase :List[str] = True
UpperCamelCase :Dict = []
else:
current_sub_tokens.append(__lowerCamelCase )
UpperCamelCase :Optional[Any] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __getstate__( self : str ):
UpperCamelCase :Tuple = self.__dict__.copy()
UpperCamelCase :str = None
return state
def __setstate__( self : Tuple , __lowerCamelCase : Optional[int] ):
UpperCamelCase :Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase :Any = {}
UpperCamelCase :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A ( self : str , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase :Union[str, Any] = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , """wb""" ) as fi:
UpperCamelCase :List[str] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 62 | 1 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase__ = 16
lowerCAmelCase__ = 32
def _A ( A__ ):
"""simple docstring"""
return int(x / 2**20 )
class lowercase_ :
"""simple docstring"""
def __enter__( self : List[str] ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__lowercase = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple ,*lowercase__ : int ):
gc.collect()
torch.cuda.empty_cache()
__lowercase = torch.cuda.memory_allocated()
__lowercase = torch.cuda.max_memory_allocated()
__lowercase = bamb(self.end - self.begin )
__lowercase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def _A ( A__ , A__ = 16 , A__ = "bert-base-cased" , A__ = 320 , A__ = 160 , ):
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(A__ )
__lowercase = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': F"train[:{n_train}]", '''validation''': F"validation[:{n_val}]"} )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase = datasets.map(
A__ , batched=A__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(A__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__lowercase = DataLoader(
tokenized_datasets['''train'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowercase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase = config['''lr''']
__lowercase = int(config['''num_epochs'''] )
__lowercase = int(config['''seed'''] )
__lowercase = int(config['''batch_size'''] )
__lowercase = args.model_name_or_path
set_seed(A__ )
__lowercase , __lowercase = get_dataloaders(A__ , A__ , A__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowercase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowercase = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowercase = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__lowercase = 1
__lowercase = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowercase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowercase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowercase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowercase = 0
# Now we train the model
__lowercase = {}
for epoch in range(A__ , A__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(A__ ):
__lowercase = model(**A__ )
__lowercase = outputs.loss
__lowercase = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__lowercase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(A__ , A__ )
def _A ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=A__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=A__ , )
parser.add_argument(
'''--output_dir''' , type=A__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=A__ , default=A__ , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=A__ , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=A__ , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=A__ , default=1 , help='''Number of train epochs.''' , )
__lowercase = parser.parse_args()
__lowercase = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 104 |
'''simple docstring'''
import functools
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : str ):
A__ = len(_lowerCamelCase )
A__ = len(_lowerCamelCase )
@functools.cache
def min_distance(_lowerCamelCase : int , _lowerCamelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
A__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _lowerCamelCase ) , 1 + min_distance(_lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase_ ( _lowerCamelCase : Any):
lowercase__ , lowercase__ : Optional[Any] = image.size
lowercase__ , lowercase__ : int = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowercase__ : int = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"])
lowercase__ : Union[str, Any] = np.array(_lowerCamelCase).astype(np.floataa) / 255.0
lowercase__ : Union[str, Any] = image[None].transpose(0 , 3 , 1 , 2)
lowercase__ : Dict = torch.from_numpy(_lowerCamelCase)
return 2.0 * image - 1.0
class snake_case_ ( __A ):
def __init__( self : str , lowercase_ : VQModel , lowercase_ : UNetaDModel , lowercase_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> List[Any]:
super().__init__()
self.register_modules(vqvae=lowercase_ , unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : Tuple , lowercase_ : Union[torch.Tensor, PIL.Image.Image] = None , lowercase_ : Optional[int] = 1 , lowercase_ : Optional[int] = 1_00 , lowercase_ : Optional[float] = 0.0 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowercase_ , PIL.Image.Image ):
lowercase__ : Any = 1
elif isinstance(lowercase_ , torch.Tensor ):
lowercase__ : List[Any] = image.shape[0]
else:
raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowercase_ )}''' )
if isinstance(lowercase_ , PIL.Image.Image ):
lowercase__ : str = preprocess(lowercase_ )
lowercase__ , lowercase__ : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowercase__ : Tuple = (batch_size, self.unet.config.in_channels // 2, height, width)
lowercase__ : Any = next(self.unet.parameters() ).dtype
lowercase__ : Optional[int] = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
lowercase__ : Tuple = image.to(device=self.device , dtype=lowercase_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowercase_ , device=self.device )
lowercase__ : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ : List[str] = {}
if accepts_eta:
lowercase__ : int = eta
for t in self.progress_bar(lowercase_ ):
# concat latents and low resolution image in the channel dimension.
lowercase__ : Optional[int] = torch.cat([latents, image] , dim=1 )
lowercase__ : Optional[int] = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
# predict the noise residual
lowercase__ : Tuple = self.unet(lowercase_ , lowercase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : Optional[int] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
# decode the image latents with the VQVAE
lowercase__ : Union[str, Any] = self.vqvae.decode(lowercase_ ).sample
lowercase__ : str = torch.clamp(lowercase_ , -1.0 , 1.0 )
lowercase__ : Any = image / 2 + 0.5
lowercase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : Optional[Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 333 | import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase = 4
UpperCamelCase = 3
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str]):
for shard in shards:
for i in range(_lowerCamelCase):
yield {"i": i, "shard": shard}
def lowercase_ ( ):
lowercase__ : List[str] = int(os.environ["RANK"])
lowercase__ : Union[str, Any] = int(os.environ["WORLD_SIZE"])
lowercase__ : Union[str, Any] = ArgumentParser()
parser.add_argument("--streaming" , type=_lowerCamelCase)
parser.add_argument("--local_rank" , type=_lowerCamelCase)
parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0)
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Dict = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(_lowerCamelCase)]}
lowercase__ : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase)
if not streaming:
lowercase__ : str = Dataset.from_list(list(_lowerCamelCase))
lowercase__ : List[str] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase)
lowercase__ : Any = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase)
lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : Any = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
lowercase__ : List[str] = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''')
if __name__ == "__main__":
main()
| 333 | 1 |
def _A ( _lowercase , _lowercase , _lowercase ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__A ) )
def _A ( _lowercase , _lowercase , _lowercase , _lowercase ) -> bool:
"""simple docstring"""
if index == len(__A ):
return True
# Recursive Step
for i in range(__A ):
if valid_coloring(graph[index] , __A , __A ):
# Color current vertex
__UpperCamelCase = i
# Validate coloring
if util_color(__A , __A , __A , index + 1 ):
return True
# Backtrack
__UpperCamelCase = -1
return False
def _A ( _lowercase , _lowercase ) -> list[int]:
"""simple docstring"""
__UpperCamelCase = [-1] * len(__A )
if util_color(__A , __A , __A , 0 ):
return colored_vertices
return []
| 310 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : str = {}
class __snake_case ( a ):
UpperCAmelCase__ : str = '''llama'''
UpperCAmelCase__ : Dict = ['''past_key_values''']
def __init__( self : str , _snake_case : List[str]=32000 , _snake_case : int=4096 , _snake_case : List[str]=11008 , _snake_case : Optional[int]=32 , _snake_case : List[Any]=32 , _snake_case : Tuple=None , _snake_case : int="silu" , _snake_case : List[Any]=2048 , _snake_case : List[str]=0.0_2 , _snake_case : Any=1e-6 , _snake_case : List[str]=True , _snake_case : Optional[Any]=0 , _snake_case : Dict=1 , _snake_case : List[Any]=2 , _snake_case : str=1 , _snake_case : Union[str, Any]=False , _snake_case : str=None , **_snake_case : List[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case , )
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _snake_case)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _snake_case)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""")
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""")
| 51 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowercase : Any = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 369 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_lowercase : int = pd.read_csv('sample_data.csv', header=None)
_lowercase : str = df.shape[:1][0]
# If you're using some other dataset input the target column
_lowercase : Optional[int] = df.iloc[:, 1:2]
_lowercase : Optional[int] = actual_data.values.reshape(len_data, 1)
_lowercase : Any = MinMaxScaler().fit_transform(actual_data)
_lowercase : Dict = 10
_lowercase : List[str] = 5
_lowercase : Any = 20
_lowercase : Optional[int] = len_data - periods * look_back
_lowercase : Optional[int] = actual_data[:division]
_lowercase : Optional[int] = actual_data[division - look_back :]
_lowercase ,_lowercase : Tuple = [], []
_lowercase ,_lowercase : Optional[Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_lowercase : List[str] = np.array(train_x)
_lowercase : str = np.array(test_x)
_lowercase : Union[str, Any] = np.array([list(i.ravel()) for i in train_y])
_lowercase : List[Any] = np.array([list(i.ravel()) for i in test_y])
_lowercase : str = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
_lowercase : str = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
_lowercase : str = model.predict(x_test)
| 86 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase : Union[str, Any] ={
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Dict =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 237 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """char"""
__lowercase = """bpe"""
__lowercase = """wp"""
__lowerCAmelCase : Union[str, Any] =(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = ["""image_processor""", """char_tokenizer"""]
__lowercase = """ViTImageProcessor"""
__lowercase = """MgpstrTokenizer"""
def __init__( self :int , lowercase_ :int=None , lowercase_ :List[str]=None , **lowercase_ :List[Any] )-> Optional[Any]:
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
A__ = kwargs.pop("feature_extractor" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
A__ = tokenizer
A__ = AutoTokenizer.from_pretrained("gpt2" )
A__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :Optional[Any] , lowercase_ :Any=None , lowercase_ :Tuple=None , lowercase_ :List[str]=None , **lowercase_ :Union[str, Any] )-> Optional[Any]:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
A__ = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None:
A__ = self.char_tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
A__ = encodings["input_ids"]
return inputs
def UpperCAmelCase_ ( self :List[str] , lowercase_ :int )-> int:
A__, A__, A__ = sequences
A__ = char_preds.size(0 )
A__, A__ = self._decode_helper(lowercase_ , "char" )
A__, A__ = self._decode_helper(lowercase_ , "bpe" )
A__, A__ = self._decode_helper(lowercase_ , "wp" )
A__ = []
A__ = []
for i in range(lowercase_ ):
A__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
A__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
A__ = scores.index(max(lowercase_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
A__ = {}
A__ = final_strs
A__ = final_scores
A__ = char_strs
A__ = bpe_strs
A__ = wp_strs
return out
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :List[str] , lowercase_ :str )-> Optional[Any]:
if format == DecodeType.CHARACTER:
A__ = self.char_decode
A__ = 1
A__ = "[s]"
elif format == DecodeType.BPE:
A__ = self.bpe_decode
A__ = 2
A__ = "#"
elif format == DecodeType.WORDPIECE:
A__ = self.wp_decode
A__ = 1_02
A__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
A__, A__ = [], []
A__ = pred_logits.size(0 )
A__ = pred_logits.size(1 )
A__, A__ = pred_logits.topk(1 , dim=-1 , largest=lowercase_ , sorted=lowercase_ )
A__ = preds_index.view(-1 , lowercase_ )[:, 1:]
A__ = decoder(lowercase_ )
A__, A__ = torch.nn.functional.softmax(lowercase_ , dim=2 ).max(dim=2 )
A__ = preds_max_prob[:, 1:]
for index in range(lowercase_ ):
A__ = preds_str[index].find(lowercase_ )
A__ = preds_str[index][:pred_eos]
A__ = preds_index[index].cpu().tolist()
A__ = pred_index.index(lowercase_ ) if eos_token in pred_index else -1
A__ = preds_max_prob[index][: pred_eos_index + 1]
A__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowercase_ )
conf_scores.append(lowercase_ )
return dec_strs, conf_scores
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[Any] )-> int:
A__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(lowercase_ )]
return decode_strs
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Optional[Any] )-> List[str]:
return self.bpe_tokenizer.batch_decode(lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :List[str] )-> Union[str, Any]:
A__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(lowercase_ )]
return decode_strs
| 237 | 1 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_UpperCAmelCase = False
class snake_case_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : int = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__lowerCAmelCase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = pipe(
image=UpperCAmelCase_ , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
__lowerCAmelCase : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase : Any = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 365 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'spiece.model'}
_UpperCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
_UpperCAmelCase = {
'google/pegasus-xsum': 512,
}
_UpperCAmelCase = logging.get_logger(__name__)
class snake_case_ ( __lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any]="<pad>" , _snake_case : int="</s>" , _snake_case : Any="<unk>" , _snake_case : Union[str, Any]="<mask_2>" , _snake_case : Any="<mask_1>" , _snake_case : Optional[int]=None , _snake_case : List[str]=103 , _snake_case : Optional[Dict[str, Any]] = None , **_snake_case : Optional[int] , )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = offset
if additional_special_tokens is not None:
if not isinstance(_snake_case , _snake_case ):
raise TypeError(
F'''additional_special_tokens should be of type {type(_snake_case )}, but is'''
F''' {type(_snake_case )}''' )
__lowerCAmelCase : List[str] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(_snake_case ) , self.offset - 1 )
]
if len(set(_snake_case ) ) != len(_snake_case ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__lowerCAmelCase : Dict = additional_special_tokens_extended
else:
__lowerCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )]
__lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_snake_case , unk_token=_snake_case , mask_token=_snake_case , pad_token=_snake_case , mask_token_sent=_snake_case , offset=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
__lowerCAmelCase : Optional[Any] = mask_token_sent
__lowerCAmelCase : Any = vocab_file
__lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# add special tokens to encoder dict
__lowerCAmelCase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__lowerCAmelCase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def UpperCAmelCase__ ( self : str )->int:
'''simple docstring'''
return len(self.sp_model ) + self.offset
def UpperCAmelCase__ ( self : Dict )->Dict[str, int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] )->str:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.__dict__.copy()
__lowerCAmelCase : Union[str, Any] = None
return state
def __setstate__( self : Any , _snake_case : str )->Any:
'''simple docstring'''
__lowerCAmelCase : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCAmelCase : Any = {}
__lowerCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Dict , _snake_case : str )->List[str]:
'''simple docstring'''
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def UpperCAmelCase__ ( self : Tuple , _snake_case : str )->int:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__lowerCAmelCase : Any = self.sp_model.piece_to_id(_snake_case )
return sp_id + self.offset
def UpperCAmelCase__ ( self : List[Any] , _snake_case : int )->str:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__lowerCAmelCase : Optional[int] = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : Optional[int] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Any = []
__lowerCAmelCase : Dict = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_snake_case ) + token
__lowerCAmelCase : int = []
else:
current_sub_tokens.append(_snake_case )
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : Dict=False )->int:
'''simple docstring'''
return 1
def UpperCAmelCase__ ( self : Tuple , _snake_case : Tuple )->str:
'''simple docstring'''
__lowerCAmelCase : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase__ ( self : List[str] , _snake_case : List , _snake_case : Optional[List] = None , _snake_case : bool = False )->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(_snake_case )
elif token_ids_a is None:
return self._special_token_mask(_snake_case ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase__ ( self : Any , _snake_case : Union[str, Any] , _snake_case : Tuple=None )->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Any , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase : Optional[int] = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , """wb""" ) as fi:
__lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,) | 232 | 0 |
__lowerCamelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__lowerCamelCase : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__lowerCamelCase : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 52 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = ''''''
lowerCamelCase_ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase_ : str = None # compression type in fsspec. ex: "gzip"
lowerCamelCase_ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(self , __magic_name__ = "" , __magic_name__ = None , __magic_name__ = None , **__magic_name__ ) -> Any:
'''simple docstring'''
super().__init__(self , **__magic_name__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case_ : Union[str, Any] = fsspec.open(
__magic_name__ , mode='''rb''' , protocol=__magic_name__ , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case_ : Tuple = os.path.basename(self.file.path.split('''::''' )[0] )
snake_case_ : Optional[Any] = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
snake_case_ : Dict = None
@classmethod
def lowerCamelCase (cls , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
return super()._strip_protocol(__magic_name__ ).lstrip('''/''' )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
if self.dir_cache is None:
snake_case_ : Optional[int] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
snake_case_ : List[str] = {f['''name''']: f}
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return self.file.open().read()
def lowerCamelCase (self , __magic_name__ , __magic_name__ = "rb" , __magic_name__=None , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = self._strip_protocol(__magic_name__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''bz2'''
lowerCamelCase_ : Any = '''bz2'''
lowerCamelCase_ : int = '''.bz2'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''gzip'''
lowerCamelCase_ : Dict = '''gzip'''
lowerCamelCase_ : int = '''.gz'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''lz4'''
lowerCamelCase_ : Any = '''lz4'''
lowerCamelCase_ : Optional[Any] = '''.lz4'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''xz'''
lowerCamelCase_ : Any = '''xz'''
lowerCamelCase_ : int = '''.xz'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''zstd'''
lowerCamelCase_ : Tuple = '''zstd'''
lowerCamelCase_ : Any = '''.zst'''
def __init__(self , __magic_name__ , __magic_name__ = "rb" , __magic_name__ = None , __magic_name__ = None , __magic_name__ = DEFAULT_BLOCK_SIZE , **__magic_name__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
fo=__magic_name__ , mode=__magic_name__ , target_protocol=__magic_name__ , target_options=__magic_name__ , block_size=__magic_name__ , **__magic_name__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case_ : Dict = self.file.__enter__
class __lowerCAmelCase :
def __init__(self , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = file_
def __enter__(self ) -> List[Any]:
'''simple docstring'''
self._file.__enter__()
return self
def __exit__(self , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
self._file.__exit__(*__magic_name__ , **__magic_name__ )
def __iter__(self ) -> Optional[int]:
'''simple docstring'''
return iter(self._file )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return next(self._file )
def __getattr__(self , __magic_name__ ) -> str:
'''simple docstring'''
return getattr(self._file , __magic_name__ )
def fixed_enter(*__magic_name__ , **__magic_name__ ):
return WrappedFile(_enter(*__magic_name__ , **__magic_name__ ) )
snake_case_ : Tuple = fixed_enter
| 279 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowerCAmelCase_ ):
print(f"""{i}\t\t{d}""" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for j in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [float("inf" )] * vertex_count
__SCREAMING_SNAKE_CASE = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
__SCREAMING_SNAKE_CASE = distance[u] + w
__SCREAMING_SNAKE_CASE = check_negative_cycle(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Union[str, Any] = int(input('''Enter number of vertices: ''').strip())
a__ : Any = int(input('''Enter number of edges: ''').strip())
a__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
a__ : str = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
a__ : str = {'''src''': src, '''dst''': dest, '''weight''': weight}
a__ : str = int(input('''\nEnter shortest path source:''').strip())
a__ : List[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 350 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple=1_3 , UpperCAmelCase__ : Optional[int]=3_0 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : int=3_2 , UpperCAmelCase__ : Any=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[str]=3_7 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Any=1_0 , UpperCAmelCase__ : str=0.02 , ) -> Tuple:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Any:
__SCREAMING_SNAKE_CASE = FlaxViTModel(config=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase_ ( self : int ) -> None:
__SCREAMING_SNAKE_CASE = FlaxViTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=3_7 )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Any:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Any ):
return model(pixel_values=UpperCAmelCase__ , **UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
__SCREAMING_SNAKE_CASE = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("google/vit-base-patch16-224" )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 195 | 0 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase__ ( enum.Enum ):
"""simple docstring"""
__UpperCAmelCase : int = 0
__UpperCAmelCase : int = 1
__UpperCAmelCase : List[Any] = 2
@add_end_docstrings(__a )
class UpperCAmelCase__ ( __a ):
"""simple docstring"""
__UpperCAmelCase : Any = '''\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '''
def __init__( self : List[Any] ,*_a : int ,**_a : str ):
'''simple docstring'''
super().__init__(*a_ ,**a_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_a : str = None
if self.model.config.prefix is not None:
_a : Optional[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_a : Any = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_a : List[str] = self._sanitize_parameters(prefix=a_ ,**self._forward_params )
_a : Dict = {**self._preprocess_params, **preprocess_params}
_a : Dict = {**self._forward_params, **forward_params}
def __lowercase ( self : Any ,_a : Optional[int]=None ,_a : str=None ,_a : Optional[int]=None ,_a : str=None ,_a : Dict=None ,_a : Tuple=None ,_a : Tuple=None ,_a : List[Any]=None ,**_a : int ,):
'''simple docstring'''
_a : str = {}
if prefix is not None:
_a : List[Any] = prefix
if prefix:
_a : List[Any] = self.tokenizer(
a_ ,padding=a_ ,add_special_tokens=a_ ,return_tensors=self.framework )
_a : List[Any] = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
' [None, \'hole\']' )
_a : List[Any] = handle_long_generation
preprocess_params.update(a_ )
_a : Optional[Any] = generate_kwargs
_a : Tuple = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_a : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_a : Union[str, Any] = ReturnType.TENSORS
if return_type is not None:
_a : List[str] = return_type
if clean_up_tokenization_spaces is not None:
_a : str = clean_up_tokenization_spaces
if stop_sequence is not None:
_a : Dict = self.tokenizer.encode(a_ ,add_special_tokens=a_ )
if len(a_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_a : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowercase ( self : Dict ,*_a : str ,**_a : Union[str, Any] ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*a_ ,**a_ )
def __call__( self : List[Any] ,_a : Dict ,**_a : List[str] ):
'''simple docstring'''
return super().__call__(a_ ,**a_ )
def __lowercase ( self : Any ,_a : int ,_a : List[Any]="" ,_a : Tuple=None ,**_a : Tuple ):
'''simple docstring'''
_a : List[Any] = self.tokenizer(
prefix + prompt_text ,padding=a_ ,add_special_tokens=a_ ,return_tensors=self.framework )
_a : int = prompt_text
if handle_long_generation == "hole":
_a : Union[str, Any] = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
_a : Union[str, Any] = generate_kwargs["""max_new_tokens"""]
else:
_a : Union[str, Any] = generate_kwargs.get('max_length' ,self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_a : List[Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_a : int = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
_a : List[str] = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def __lowercase ( self : int ,_a : Any ,**_a : List[str] ):
'''simple docstring'''
_a : int = model_inputs["""input_ids"""]
_a : str = model_inputs.get('attention_mask' ,a_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_a : Any = None
_a : int = None
_a : Union[str, Any] = 1
else:
_a : List[Any] = input_ids.shape[0]
_a : Optional[int] = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_a : Any = generate_kwargs.pop('prefix_length' ,0 )
if prefix_length > 0:
_a : str = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
_a : Dict = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_a : Any = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_a : Dict = self.model.generate(input_ids=a_ ,attention_mask=a_ ,**a_ )
_a : Union[str, Any] = generated_sequence.shape[0]
if self.framework == "pt":
_a : List[Any] = generated_sequence.reshape(a_ ,out_b // in_b ,*generated_sequence.shape[1:] )
elif self.framework == "tf":
_a : Optional[int] = tf.reshape(a_ ,(in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowercase ( self : Optional[Any] ,_a : Dict ,_a : Any=ReturnType.FULL_TEXT ,_a : Union[str, Any]=True ):
'''simple docstring'''
_a : Optional[int] = model_outputs["""generated_sequence"""][0]
_a : Any = model_outputs["""input_ids"""]
_a : int = model_outputs["""prompt_text"""]
_a : Optional[Any] = generated_sequence.numpy().tolist()
_a : str = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_a : Union[str, Any] = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_a : List[str] = self.tokenizer.decode(
a_ ,skip_special_tokens=a_ ,clean_up_tokenization_spaces=a_ ,)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_a : str = 0
else:
_a : Optional[Any] = len(
self.tokenizer.decode(
input_ids[0] ,skip_special_tokens=a_ ,clean_up_tokenization_spaces=a_ ,) )
if return_type == ReturnType.FULL_TEXT:
_a : Optional[int] = prompt_text + text[prompt_length:]
else:
_a : int = text[prompt_length:]
_a : List[Any] = {"""generated_text""": all_text}
records.append(a_ )
return records
| 271 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = CodeGenTokenizer
lowercase__ = CodeGenTokenizerFast
lowercase__ = True
lowercase__ = {"add_prefix_space": True}
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_snake_case : Tuple = dict(zip(a_, range(len(a_ ) ) ) )
_snake_case : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_snake_case : List[Any] = {"""unk_token""": """<unk>"""}
_snake_case : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
def UpperCamelCase_ ( self: Any, **a_: int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Any, **a_: str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = """lower newer"""
_snake_case : Tuple = """lower newer"""
return input_text, output_text
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
_snake_case : Optional[Any] = """lower newer"""
_snake_case : Optional[int] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_snake_case : int = tokenizer.tokenize(a_, add_prefix_space=a_ )
self.assertListEqual(a_, a_ )
_snake_case : str = tokens + [tokenizer.unk_token]
_snake_case : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : int = self.get_tokenizer()
_snake_case : int = self.get_rust_tokenizer(add_prefix_space=a_ )
_snake_case : Dict = """lower newer"""
# Testing tokenization
_snake_case : Dict = tokenizer.tokenize(a_, add_prefix_space=a_ )
_snake_case : List[str] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Testing conversion to ids without special tokens
_snake_case : Optional[Any] = tokenizer.encode(a_, add_special_tokens=a_, add_prefix_space=a_ )
_snake_case : Tuple = rust_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
# Testing conversion to ids with special tokens
_snake_case : Tuple = self.get_rust_tokenizer(add_prefix_space=a_ )
_snake_case : int = tokenizer.encode(a_, add_prefix_space=a_ )
_snake_case : Optional[Any] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_, a_ )
# Testing the unknown token
_snake_case : Tuple = tokens + [rust_tokenizer.unk_token]
_snake_case : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ), a_ )
def UpperCamelCase_ ( self: Dict, *a_: Dict, **a_: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int, a_: List[Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
# Simple input
_snake_case : Any = """This is a simple input"""
_snake_case : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case : Optional[int] = ("""This is a simple input""", """This is a pair""")
_snake_case : Optional[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
# Pair input
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token="""<pad>""" )
# Simple input
_snake_case : List[Any] = """This is a simple input"""
_snake_case : int = ["""This is a simple input looooooooong""", """This is a simple input"""]
_snake_case : Any = ("""This is a simple input""", """This is a pair""")
_snake_case : str = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_snake_case : str = tokenizer.pad_token_id
_snake_case : Optional[int] = tokenizer(a_, padding="""max_length""", max_length=30, return_tensors="""np""" )
_snake_case : Dict = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" )
_snake_case : Tuple = tokenizer(*a_, padding="""max_length""", max_length=60, return_tensors="""np""" )
_snake_case : Optional[Any] = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1], 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1], 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1], 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1], 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = """$$$"""
_snake_case : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=a_, add_bos_token=a_ )
_snake_case : str = """This is a simple input"""
_snake_case : int = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case : Union[str, Any] = tokenizer.bos_token_id
_snake_case : Tuple = tokenizer(a_ )
_snake_case : Optional[Any] = tokenizer(a_ )
self.assertEqual(out_s.input_ids[0], a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_snake_case : Optional[int] = tokenizer.decode(out_s.input_ids )
_snake_case : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[int] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_snake_case : Dict = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_snake_case : Union[str, Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_snake_case : Optional[Any] = tokenizer.encode(a_ )
_snake_case : Dict = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_snake_case : Optional[Any] = tokenizer.decode(a_, truncate_before_pattern=a_ )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
| 64 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a_ ( __snake_case : Sequence[float] , __snake_case : int , __snake_case : int ) -> tuple[int | None, int | None, float]:
"""simple docstring"""
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
lowerCamelCase_ =(low + high) // 2
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =max_subarray(__snake_case , __snake_case , __snake_case )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =max_subarray(__snake_case , mid + 1 , __snake_case )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =max_cross_sum(__snake_case , __snake_case , __snake_case , __snake_case )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a_ ( __snake_case : Sequence[float] , __snake_case : int , __snake_case : int , __snake_case : int ) -> tuple[int, int, float]:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =float('''-inf''' ), -1
lowerCamelCase_, lowerCamelCase_ =float('''-inf''' ), -1
lowerCamelCase_ =0
for i in range(__snake_case , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
lowerCamelCase_ =summ
lowerCamelCase_ =i
lowerCamelCase_ =0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
lowerCamelCase_ =summ
lowerCamelCase_ =i
return max_left, max_right, (left_sum + right_sum)
def a_ ( __snake_case : int ) -> float:
"""simple docstring"""
lowerCamelCase_ =[randint(1 , __snake_case ) for _ in range(__snake_case )]
lowerCamelCase_ =time.time()
max_subarray(__snake_case , 0 , input_size - 1 )
lowerCamelCase_ =time.time()
return end - start
def a_ ( ) -> None:
"""simple docstring"""
lowerCamelCase_ =[10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
lowerCamelCase_ =[time_max_subarray(__snake_case ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(__snake_case , __snake_case ):
print(__snake_case , '''\t\t''' , __snake_case )
plt.plot(__snake_case , __snake_case )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6 |
'''simple docstring'''
a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a_ : list[bool | None] = [None] * 10_00_00_00
a_ : List[Any] = True
a_ : Optional[Any] = False
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase_ =chain(next_number(__snake_case ) )
lowerCamelCase_ =number_chain
while number < 1000_0000:
lowerCamelCase_ =number_chain
number *= 10
return number_chain
def a_ ( __snake_case : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 6 | 1 |
"""simple docstring"""
import requests
__UpperCamelCase = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 113 | """simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def a_ ( _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 , _lowerCAmelCase : float = 1 , _lowerCAmelCase : float = 1.0E4 , _lowerCAmelCase : bool = False , _lowerCAmelCase : float = 1.0 , ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even"""
lowercase__ : Optional[Any] = float(embedding_dim // 2 )
lowercase__ : Optional[Any] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowercase__ : Any = min_timescale * jnp.exp(jnp.arange(_lowerCAmelCase , dtype=jnp.floataa ) * -log_timescale_increment )
lowercase__ : Dict = jnp.expand_dims(_lowerCAmelCase , 1 ) * jnp.expand_dims(_lowerCAmelCase , 0 )
# scale embeddings
lowercase__ : List[str] = scale * emb
if flip_sin_to_cos:
lowercase__ : Dict = jnp.concatenate([jnp.cos(_lowerCAmelCase ), jnp.sin(_lowerCAmelCase )] , axis=1 )
else:
lowercase__ : Optional[int] = jnp.concatenate([jnp.sin(_lowerCAmelCase ), jnp.cos(_lowerCAmelCase )] , axis=1 )
lowercase__ : List[Any] = jnp.reshape(_lowerCAmelCase , [jnp.shape(_lowerCAmelCase )[0], embedding_dim] )
return signal
class UpperCAmelCase_ ( nn.Module):
lowerCamelCase__ : int = 3_2
lowerCamelCase__ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , a ) -> Any:
lowercase__ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(a )
lowercase__ : Union[str, Any] = nn.silu(a )
lowercase__ : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(a )
return temb
class UpperCAmelCase_ ( nn.Module):
lowerCamelCase__ : int = 3_2
lowerCamelCase__ : bool = False
lowerCamelCase__ : float = 1
@nn.compact
def __call__( self , a ) -> str:
return get_sinusoidal_embeddings(
a , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 77 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["pixel_values"]
def __init__( self : Optional[Any], _UpperCAmelCase : bool = True, _UpperCAmelCase : Dict[str, int] = None, _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC, _UpperCAmelCase : bool = True, _UpperCAmelCase : Dict[str, int] = None, _UpperCAmelCase : bool = True, _UpperCAmelCase : Union[int, float] = 1 / 2_5_5, _UpperCAmelCase : bool = True, _UpperCAmelCase : Optional[Union[float, List[float]]] = None, _UpperCAmelCase : Optional[Union[float, List[float]]] = None, _UpperCAmelCase : bool = True, **_UpperCAmelCase : Tuple, ) -> None:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_2_4}
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_size_dict(_UpperCAmelCase, default_to_square=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_size_dict(_UpperCAmelCase, default_to_square=_UpperCAmelCase, param_name="crop_size" )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size
SCREAMING_SNAKE_CASE__ : Optional[int] = resample
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : int = crop_size
SCREAMING_SNAKE_CASE__ : str = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE__ : str = do_convert_rgb
def A_ ( self : List[Any], _UpperCAmelCase : np.ndarray, _UpperCAmelCase : Dict[str, int], _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC, _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None, **_UpperCAmelCase : List[Any], ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(_UpperCAmelCase, default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ : Tuple = get_resize_output_image_size(_UpperCAmelCase, size=size["shortest_edge"], default_to_square=_UpperCAmelCase )
return resize(_UpperCAmelCase, size=_UpperCAmelCase, resample=_UpperCAmelCase, data_format=_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : Optional[int], _UpperCAmelCase : np.ndarray, _UpperCAmelCase : Dict[str, int], _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None, **_UpperCAmelCase : Dict, ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCAmelCase, size=(size["height"], size["width"]), data_format=_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : List[str], _UpperCAmelCase : np.ndarray, _UpperCAmelCase : Union[int, float], _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None, **_UpperCAmelCase : Optional[Any], ) -> Any:
"""simple docstring"""
return rescale(_UpperCAmelCase, scale=_UpperCAmelCase, data_format=_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : str, _UpperCAmelCase : np.ndarray, _UpperCAmelCase : Union[float, List[float]], _UpperCAmelCase : Union[float, List[float]], _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None, **_UpperCAmelCase : str, ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCAmelCase, mean=_UpperCAmelCase, std=_UpperCAmelCase, data_format=_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : List[Any], _UpperCAmelCase : ImageInput, _UpperCAmelCase : bool = None, _UpperCAmelCase : Dict[str, int] = None, _UpperCAmelCase : PILImageResampling = None, _UpperCAmelCase : bool = None, _UpperCAmelCase : int = None, _UpperCAmelCase : bool = None, _UpperCAmelCase : float = None, _UpperCAmelCase : bool = None, _UpperCAmelCase : Optional[Union[float, List[float]]] = None, _UpperCAmelCase : Optional[Union[float, List[float]]] = None, _UpperCAmelCase : bool = None, _UpperCAmelCase : Optional[Union[str, TensorType]] = None, _UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST, **_UpperCAmelCase : Dict, ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : int = get_size_dict(_UpperCAmelCase, param_name="size", default_to_square=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : str = get_size_dict(_UpperCAmelCase, param_name="crop_size", default_to_square=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Optional[int] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE__ : Dict = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE__ : Dict = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=_UpperCAmelCase, size=_UpperCAmelCase, resample=_UpperCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : Any = [self.center_crop(image=_UpperCAmelCase, size=_UpperCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.rescale(image=_UpperCAmelCase, scale=_UpperCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : List[str] = [self.normalize(image=_UpperCAmelCase, mean=_UpperCAmelCase, std=_UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = [to_channel_dimension_format(_UpperCAmelCase, _UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = {"pixel_values": images}
return BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
| 191 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : complex , SCREAMING_SNAKE_CASE__ : str = "x" , SCREAMING_SNAKE_CASE__ : float = 10**-10 , SCREAMING_SNAKE_CASE__ : int = 1 , ) -> complex:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = symbols(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = lambdify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = lambdify(SCREAMING_SNAKE_CASE__ , diff(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Any = starting_point
while True:
if diff_function(SCREAMING_SNAKE_CASE__ ) != 0:
SCREAMING_SNAKE_CASE__ : Any = prev_guess - multiplicity * func(SCREAMING_SNAKE_CASE__ ) / diff_function(
SCREAMING_SNAKE_CASE__ )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE__ : Optional[int] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"{newton_raphson('exp(x) - 1', 1_0, precision=0.005)}",
)
# Find root of cos(x)
print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
| 191 | 1 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
UpperCAmelCase__ = True
except ImportError:
UpperCAmelCase__ = False
try:
from torch.hub import _get_torch_home
UpperCAmelCase__ = _get_torch_home()
except ImportError:
UpperCAmelCase__ = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
UpperCAmelCase__ = os.path.join(torch_cache_home, "transformers")
UpperCAmelCase__ = "https://cdn.huggingface.co"
UpperCAmelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert"
UpperCAmelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
UpperCAmelCase__ = os.path.join(PATH, "config.yaml")
UpperCAmelCase__ = os.path.join(PATH, "attributes.txt")
UpperCAmelCase__ = os.path.join(PATH, "objects.txt")
UpperCAmelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
UpperCAmelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
UpperCAmelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
UpperCAmelCase__ = "pytorch_model.bin"
UpperCAmelCase__ = "config.yaml"
def A ( _UpperCAmelCase : Any=OBJECTS , _UpperCAmelCase : Any=ATTRIBUTES ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = []
with open(_UpperCAmelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
_UpperCAmelCase = []
with open(_UpperCAmelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def A ( _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
with open(_UpperCAmelCase , 'rb' ) as f:
_UpperCAmelCase = pkl.load(_UpperCAmelCase )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
_UpperCAmelCase = ckp.pop(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , np.ndarray ):
_UpperCAmelCase = torch.tensor(_UpperCAmelCase )
else:
assert isinstance(_UpperCAmelCase , torch.tensor ), type(_UpperCAmelCase )
_UpperCAmelCase = v
return r
class __lowerCAmelCase :
UpperCamelCase = {}
def __init__( self : Union[str, Any] , A : dict , A : str = "root" , A : Union[str, Any]=0) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = name
_UpperCAmelCase = level
_UpperCAmelCase = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_UpperCAmelCase = copy.deepcopy(A)
_UpperCAmelCase = copy.deepcopy(A)
if isinstance(A , A):
_UpperCAmelCase = Config(A , name=A , level=level + 1)
_UpperCAmelCase = v
setattr(self , A , A)
_UpperCAmelCase = d
def __repr__( self : int) -> Optional[int]:
"""simple docstring"""
return str(list((self._pointer.keys())))
def __setattr__( self : int , A : Dict , A : Union[str, Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = val
_UpperCAmelCase = val
_UpperCAmelCase = key.split('.')
_UpperCAmelCase = len(A) - 1
_UpperCAmelCase = self._pointer
if len(A) > 1:
for i, l in enumerate(A):
if hasattr(self , A) and isinstance(getattr(self , A) , A):
setattr(getattr(self , A) , '.'.join(levels[i:]) , A)
if l == last_level:
_UpperCAmelCase = val
else:
_UpperCAmelCase = pointer[l]
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return self._pointer
def _lowerCamelCase ( self : Optional[Any] , A : Dict , A : List[str]) -> str:
"""simple docstring"""
with open(F"{file_name}" , 'w') as stream:
dump(A , A)
def _lowerCamelCase ( self : Union[str, Any] , A : Tuple , A : Dict) -> str:
"""simple docstring"""
with open(F"{file_name}" , 'w') as stream:
json.dump(A , A)
@staticmethod
def _lowerCamelCase ( A : Optional[Any]) -> int:
"""simple docstring"""
with open(A) as stream:
_UpperCAmelCase = load(A , Loader=A)
return data
def __str__( self : List[str]) -> str:
"""simple docstring"""
_UpperCAmelCase = ' '
if self._name != "root":
_UpperCAmelCase = F"{t * (self._level-1)}{self._name}:\n"
else:
_UpperCAmelCase = ''
_UpperCAmelCase = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(A , A):
r += F"{t * (self._level)}{v}\n"
self._level += 1
else:
r += F"{t * (self._level)}{k}: {v} ({type(A).__name__})\n"
_UpperCAmelCase = level
return r[:-1]
@classmethod
def _lowerCamelCase ( cls : Any , A : str , **A : List[str]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(A , **A)
return cls(A)
@classmethod
def _lowerCamelCase ( cls : str , A : str , **A : Tuple) -> Dict:
"""simple docstring"""
_UpperCAmelCase = kwargs.pop('cache_dir' , A)
_UpperCAmelCase = kwargs.pop('force_download' , A)
_UpperCAmelCase = kwargs.pop('resume_download' , A)
_UpperCAmelCase = kwargs.pop('proxies' , A)
_UpperCAmelCase = kwargs.pop('local_files_only' , A)
if os.path.isdir(A):
_UpperCAmelCase = os.path.join(A , A)
elif os.path.isfile(A) or is_remote_url(A):
_UpperCAmelCase = pretrained_model_name_or_path
else:
_UpperCAmelCase = hf_bucket_url(A , filename=A , use_cdn=A)
try:
# Load from URL or cache if already cached
_UpperCAmelCase = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_UpperCAmelCase = Config.load_yaml(A)
except EnvironmentError:
_UpperCAmelCase = 'Can\'t load config for'
raise EnvironmentError(A)
if resolved_config_file == config_file:
print('loading configuration file from path')
else:
print('loading configuration file cache')
return Config.load_yaml(A), kwargs
def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = torch.load('dump.pt' , map_location=in_tensor.device )
_UpperCAmelCase = in_tensor.numpy()
_UpperCAmelCase = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , rtol=0.01 , atol=0.1 ), (
F"{sum([1 for x in np.isclose(_UpperCAmelCase , _UpperCAmelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def A ( _UpperCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = urlparse(_UpperCAmelCase )
return parsed.scheme in ("http", "https")
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any]=True ) -> str:
'''simple docstring'''
_UpperCAmelCase = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_UpperCAmelCase = '/' not in model_id
if legacy_format:
return F"{endpoint}/{model_id}-{filename}"
else:
return F"{endpoint}/{model_id}/{filename}"
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : Union[str, Any]=None , ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
ua += "; " + "; ".join('{}/{}'.format(_UpperCAmelCase , _UpperCAmelCase ) for k, v in user_agent.items() )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
ua += "; " + user_agent
_UpperCAmelCase = {'user-agent': ua}
if resume_size > 0:
_UpperCAmelCase = 'bytes=%d-' % (resume_size,)
_UpperCAmelCase = requests.get(_UpperCAmelCase , stream=_UpperCAmelCase , proxies=_UpperCAmelCase , headers=_UpperCAmelCase )
if response.status_code == 416: # Range not satisfiable
return
_UpperCAmelCase = response.headers.get('Content-Length' )
_UpperCAmelCase = resume_size + int(_UpperCAmelCase ) if content_length is not None else None
_UpperCAmelCase = tqdm(
unit='B' , unit_scale=_UpperCAmelCase , total=_UpperCAmelCase , initial=_UpperCAmelCase , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_UpperCAmelCase ) )
temp_file.write(_UpperCAmelCase )
progress.close()
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Union[str, Any]=10 , _UpperCAmelCase : int=False , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]=False , ) -> Union[str, Any]:
'''simple docstring'''
if cache_dir is None:
_UpperCAmelCase = TRANSFORMERS_CACHE
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = str(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_UpperCAmelCase = None
if not local_files_only:
try:
_UpperCAmelCase = requests.head(_UpperCAmelCase , allow_redirects=_UpperCAmelCase , proxies=_UpperCAmelCase , timeout=_UpperCAmelCase )
if response.status_code == 200:
_UpperCAmelCase = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_UpperCAmelCase = url_to_filename(_UpperCAmelCase , _UpperCAmelCase )
# get cache path to put the file
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_UpperCAmelCase ):
return cache_path
else:
_UpperCAmelCase = [
file
for file in fnmatch.filter(os.listdir(_UpperCAmelCase ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(_UpperCAmelCase ) > 0:
return os.path.join(_UpperCAmelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(_UpperCAmelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_UpperCAmelCase = cache_path + '.lock'
with FileLock(_UpperCAmelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(_UpperCAmelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_UpperCAmelCase = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(_UpperCAmelCase , 'a+b' ) as f:
yield f
_UpperCAmelCase = _resumable_file_manager
if os.path.exists(_UpperCAmelCase ):
_UpperCAmelCase = os.stat(_UpperCAmelCase ).st_size
else:
_UpperCAmelCase = 0
else:
_UpperCAmelCase = partial(tempfile.NamedTemporaryFile , dir=_UpperCAmelCase , delete=_UpperCAmelCase )
_UpperCAmelCase = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , _UpperCAmelCase , temp_file.name , )
http_get(
_UpperCAmelCase , _UpperCAmelCase , proxies=_UpperCAmelCase , resume_size=_UpperCAmelCase , user_agent=_UpperCAmelCase , )
os.replace(temp_file.name , _UpperCAmelCase )
_UpperCAmelCase = {'url': url, 'etag': etag}
_UpperCAmelCase = cache_path + '.json'
with open(_UpperCAmelCase , 'w' ) as meta_file:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
return cache_path
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any]=None ) -> Any:
'''simple docstring'''
_UpperCAmelCase = url.encode('utf-8' )
_UpperCAmelCase = shaaaa(_UpperCAmelCase )
_UpperCAmelCase = url_hash.hexdigest()
if etag:
_UpperCAmelCase = etag.encode('utf-8' )
_UpperCAmelCase = shaaaa(_UpperCAmelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=False , _UpperCAmelCase : int=None , _UpperCAmelCase : Any=False , _UpperCAmelCase : Any=False , _UpperCAmelCase : str=False , ) -> List[str]:
'''simple docstring'''
if cache_dir is None:
_UpperCAmelCase = TRANSFORMERS_CACHE
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = str(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = str(_UpperCAmelCase )
if is_remote_url(_UpperCAmelCase ):
# URL, so get it from the cache (downloading if necessary)
_UpperCAmelCase = get_from_cache(
_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , user_agent=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
elif os.path.exists(_UpperCAmelCase ):
# File, and it exists.
_UpperCAmelCase = url_or_filename
elif urlparse(_UpperCAmelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(_UpperCAmelCase ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(_UpperCAmelCase ) )
if extract_compressed_file:
if not is_zipfile(_UpperCAmelCase ) and not tarfile.is_tarfile(_UpperCAmelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_UpperCAmelCase , _UpperCAmelCase = os.path.split(_UpperCAmelCase )
_UpperCAmelCase = output_file.replace('.' , '-' ) + '-extracted'
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ) and os.listdir(_UpperCAmelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_UpperCAmelCase = output_path + '.lock'
with FileLock(_UpperCAmelCase ):
shutil.rmtree(_UpperCAmelCase , ignore_errors=_UpperCAmelCase )
os.makedirs(_UpperCAmelCase )
if is_zipfile(_UpperCAmelCase ):
with ZipFile(_UpperCAmelCase , 'r' ) as zip_file:
zip_file.extractall(_UpperCAmelCase )
zip_file.close()
elif tarfile.is_tarfile(_UpperCAmelCase ):
_UpperCAmelCase = tarfile.open(_UpperCAmelCase )
tar_file.extractall(_UpperCAmelCase )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(_UpperCAmelCase ) )
return output_path_extracted
return output_path
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any]="," ) -> Dict:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
if os.path.isfile(_UpperCAmelCase ):
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = eval(f.read() )
else:
_UpperCAmelCase = requests.get(_UpperCAmelCase )
try:
_UpperCAmelCase = requests.json()
except Exception:
_UpperCAmelCase = req.content.decode()
assert data is not None, "could not connect"
try:
_UpperCAmelCase = eval(_UpperCAmelCase )
except Exception:
_UpperCAmelCase = data.split('\n' )
req.close()
return data
def A ( _UpperCAmelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = requests.get(_UpperCAmelCase )
_UpperCAmelCase = np.array(Image.open(BytesIO(response.content ) ) )
return img
def A ( _UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_UpperCAmelCase )
with open(_UpperCAmelCase , 'rb' ) as stream:
_UpperCAmelCase = pkl.load(_UpperCAmelCase )
_UpperCAmelCase = weights.pop('model' )
_UpperCAmelCase = {}
for k, v in model.items():
_UpperCAmelCase = torch.from_numpy(_UpperCAmelCase )
if "running_var" in k:
_UpperCAmelCase = torch.tensor([0] )
_UpperCAmelCase = k.replace('running_var' , 'num_batches_tracked' )
_UpperCAmelCase = zero
return new
def A ( ) -> str:
'''simple docstring'''
print(F"{os.path.abspath(os.path.join(_UpperCAmelCase , os.pardir ) )}/demo.ipynb" )
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]="RGB" ) -> int:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
if os.path.isfile(_UpperCAmelCase ):
_UpperCAmelCase = cva.imread(_UpperCAmelCase )
else:
_UpperCAmelCase = get_image_from_url(_UpperCAmelCase )
assert img is not None, F"could not connect to: {im}"
_UpperCAmelCase = cva.cvtColor(_UpperCAmelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
_UpperCAmelCase = img[:, :, ::-1]
return img
def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[Any]=1 ) -> List[Any]:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ))
| 339 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCAmelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCAmelCase__ = TaTokenizerFast
UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCAmelCase__ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 339 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = 16 , _UpperCamelCase = "bert-base-cased" ):
__lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(_UpperCamelCase )
__lowerCAmelCase : str = load_dataset('glue' , 'mrpc' )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase : Dict = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCAmelCase : Union[str, Any] = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCamelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_UpperCamelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__lowerCAmelCase : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
__lowerCAmelCase : List[str] = DataLoader(
tokenized_datasets['validation'] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
# Initialize accelerator
__lowerCAmelCase : Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase : List[str] = config['lr']
__lowerCAmelCase : Optional[Any] = int(config['num_epochs'] )
__lowerCAmelCase : int = int(config['seed'] )
__lowerCAmelCase : int = int(config['batch_size'] )
__lowerCAmelCase : List[Any] = args.model_name_or_path
set_seed(_UpperCamelCase )
__lowerCAmelCase : Any = get_dataloaders(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_UpperCamelCase , return_dict=_UpperCamelCase )
# Instantiate optimizer
__lowerCAmelCase : Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCAmelCase : int = optimizer_cls(params=model.parameters() , lr=_UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
__lowerCAmelCase : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__lowerCAmelCase : Dict = 1
__lowerCAmelCase : List[Any] = (len(_UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCAmelCase : int = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=0 , num_training_steps=_UpperCamelCase , )
else:
__lowerCAmelCase : str = DummyScheduler(_UpperCamelCase , total_num_steps=_UpperCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase : Optional[int] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
__lowerCAmelCase : int = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCAmelCase : Tuple = 0
# Now we train the model
__lowerCAmelCase : Union[str, Any] = evaluate.load('glue' , 'mrpc' )
__lowerCAmelCase : Optional[Any] = 0
__lowerCAmelCase : List[Any] = {}
for epoch in range(_UpperCamelCase , _UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
__lowerCAmelCase : List[str] = model(**_UpperCamelCase )
__lowerCAmelCase : Optional[Any] = outputs.loss
__lowerCAmelCase : str = loss / gradient_accumulation_steps
accelerator.backward(_UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__lowerCAmelCase : Union[str, Any] = 0
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase : Any = model(**_UpperCamelCase )
__lowerCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCAmelCase : int = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_UpperCamelCase ) - 1:
__lowerCAmelCase : int = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCAmelCase : Tuple = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
__lowerCAmelCase : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _UpperCamelCase )
__lowerCAmelCase : Dict = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
__lowerCAmelCase : Union[str, Any] = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ():
__lowerCAmelCase : List[str] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_UpperCamelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCamelCase , )
parser.add_argument(
'--output_dir' , type=_UpperCamelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=_UpperCamelCase , default=_UpperCamelCase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=_UpperCamelCase , default=3 , help='Number of train epochs.' , )
__lowerCAmelCase : Optional[int] = parser.parse_args()
__lowerCAmelCase : Dict = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main() | 361 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {}
lowerCamelCase__ = {}
lowerCamelCase__ = {}
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , ):
__lowerCAmelCase : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
__lowerCAmelCase : str = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
__lowerCAmelCase : Any = format_type
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None ):
__lowerCAmelCase : int = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__lowerCAmelCase : str = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
lowerCamelCase__ = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
lowerCamelCase__ = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
lowerCamelCase__ = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def __lowerCAmelCase (_UpperCamelCase ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __lowerCAmelCase (_UpperCamelCase , **_UpperCamelCase ):
__lowerCAmelCase : Any = get_format_type_from_alias(_UpperCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_UpperCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" ) | 182 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase , __UpperCamelCase =[], []
while len(SCREAMING_SNAKE_CASE__ ) > 1:
__UpperCamelCase , __UpperCamelCase =min(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ )
start.append(SCREAMING_SNAKE_CASE__ )
end.append(SCREAMING_SNAKE_CASE__ )
collection.remove(SCREAMING_SNAKE_CASE__ )
collection.remove(SCREAMING_SNAKE_CASE__ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_A = input('Enter numbers separated by a comma:\n').strip()
_A = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 62 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "" ):
__UpperCamelCase =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
__UpperCamelCase =BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ ).text , 'html.parser' )
__UpperCamelCase =soup.find_all('td' , attrs='titleColumn' )
__UpperCamelCase =soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "IMDb_Top_250_Movies.csv" ):
__UpperCamelCase =get_imdb_top_aaa_movies()
with open(SCREAMING_SNAKE_CASE__ , 'w' , newline='' ) as out_file:
__UpperCamelCase =csv.writer(SCREAMING_SNAKE_CASE__ )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 62 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = 10
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4]
UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ''''''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
UpperCAmelCase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_snake_case , _snake_case)
UpperCAmelCase_ = ['''It was the best of times.''']
self.assertEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy())
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy())
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy())
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = 101
UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case)
np.testing.assert_array_equal(_snake_case , _snake_case)
| 7 |
from maths.prime_factors import prime_factors
def A (__A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__A )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(__A ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __a ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = image.size
__UpperCAmelCase , __UpperCAmelCase = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
__UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 255.0
__UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
__UpperCAmelCase = torch.from_numpy(SCREAMING_SNAKE_CASE )
return 2.0 * image - 1.0
class A_ ( _a ):
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__ , lowercase__ , ) -> Dict:
super().__init__()
self.register_modules(vqvae=lowercase__ , unet=lowercase__ , scheduler=lowercase__ )
@torch.no_grad()
def __call__(self , lowercase__ = None , lowercase__ = 1 , lowercase__ = 100 , lowercase__ = 0.0 , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowercase__ , PIL.Image.Image ):
__UpperCAmelCase = 1
elif isinstance(lowercase__ , torch.Tensor ):
__UpperCAmelCase = image.shape[0]
else:
raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowercase__ )}''' )
if isinstance(lowercase__ , PIL.Image.Image ):
__UpperCAmelCase = preprocess(lowercase__ )
__UpperCAmelCase , __UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
__UpperCAmelCase = next(self.unet.parameters() ).dtype
__UpperCAmelCase = randn_tensor(lowercase__ , generator=lowercase__ , device=self.device , dtype=lowercase__ )
__UpperCAmelCase = image.to(device=self.device , dtype=lowercase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowercase__ , device=self.device )
__UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__UpperCAmelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__UpperCAmelCase = {}
if accepts_eta:
__UpperCAmelCase = eta
for t in self.progress_bar(lowercase__ ):
# concat latents and low resolution image in the channel dimension.
__UpperCAmelCase = torch.cat([latents, image] , dim=1 )
__UpperCAmelCase = self.scheduler.scale_model_input(lowercase__ , lowercase__ )
# predict the noise residual
__UpperCAmelCase = self.unet(lowercase__ , lowercase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
# decode the image latents with the VQVAE
__UpperCAmelCase = self.vqvae.decode(lowercase__ ).sample
__UpperCAmelCase = torch.clamp(lowercase__ , -1.0 , 1.0 )
__UpperCAmelCase = image / 2 + 0.5
__UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 333 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Tuple = logging.get_logger(__name__)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = b.T
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=1 )
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=0 )
__UpperCAmelCase = np.matmul(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = x.reshape(-1 , 3 )
__UpperCAmelCase = squared_euclidean_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return np.argmin(SCREAMING_SNAKE_CASE , axis=1 )
class A_ ( _a ):
'''simple docstring'''
a__ = ["pixel_values"]
def __init__(self , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = True , **lowercase__ , ) -> None:
super().__init__(**lowercase__ )
__UpperCAmelCase = size if size is not None else {'''height''': 256, '''width''': 256}
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = np.array(lowercase__ ) if clusters is not None else None
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = resample
__UpperCAmelCase = do_normalize
__UpperCAmelCase = do_color_quantize
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
__UpperCAmelCase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowercase__ , size=(size['''height'''], size['''width''']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__UpperCAmelCase = rescale(image=lowercase__ , scale=1 / 127.5 , data_format=lowercase__ )
__UpperCAmelCase = image - 1
return image
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> PIL.Image.Image:
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCAmelCase = clusters if clusters is not None else self.clusters
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__UpperCAmelCase = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_normalize:
__UpperCAmelCase = [self.normalize(image=lowercase__ ) for image in images]
if do_color_quantize:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = color_quantize(lowercase__ , lowercase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCAmelCase = images.shape[0]
__UpperCAmelCase = images.reshape(lowercase__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCAmelCase = list(lowercase__ )
else:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
__UpperCAmelCase = {'''input_ids''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 333 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A_ : Any = logging.getLogger(__name__)
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = None
UpperCAmelCase = None
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """train"""
UpperCAmelCase = """dev"""
UpperCAmelCase = """test"""
class lowercase :
"""simple docstring"""
@staticmethod
def _snake_case ( a_ ,a_ ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def _snake_case ( a_ ) -> List[str]:
raise NotImplementedError
@staticmethod
def _snake_case ( a_ ,a_ ,a_ ,a_ ,a_=False ,a_="[CLS]" ,a_=1 ,a_="[SEP]" ,a_=False ,a_=False ,a_=0 ,a_=0 ,a_=-100 ,a_=0 ,a_=True ,) -> List[InputFeatures]:
_UpperCAmelCase : List[Any] = {label: i for i, label in enumerate(a_ )}
_UpperCAmelCase : str = []
for ex_index, example in enumerate(a_ ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d of %d""" ,a_ ,len(a_ ) )
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : List[Any] = []
for word, label in zip(example.words ,example.labels ):
_UpperCAmelCase : int = tokenizer.tokenize(a_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(a_ ) > 0:
tokens.extend(a_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(a_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_UpperCAmelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add()
if len(a_ ) > max_seq_length - special_tokens_count:
_UpperCAmelCase : Optional[int] = tokens[: (max_seq_length - special_tokens_count)]
_UpperCAmelCase : str = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_UpperCAmelCase : str = [sequence_a_segment_id] * len(a_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_UpperCAmelCase : Optional[int] = [cls_token] + tokens
_UpperCAmelCase : List[str] = [pad_token_label_id] + label_ids
_UpperCAmelCase : str = [cls_token_segment_id] + segment_ids
_UpperCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(a_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_UpperCAmelCase : Optional[Any] = [1 if mask_padding_with_zero else 0] * len(a_ )
# Zero-pad up to the sequence length.
_UpperCAmelCase : Optional[Any] = max_seq_length - len(a_ )
if pad_on_left:
_UpperCAmelCase : Optional[int] = ([pad_token] * padding_length) + input_ids
_UpperCAmelCase : Optional[Any] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_UpperCAmelCase : List[str] = ([pad_token_segment_id] * padding_length) + segment_ids
_UpperCAmelCase : str = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(a_ ) == max_seq_length
assert len(a_ ) == max_seq_length
assert len(a_ ) == max_seq_length
assert len(a_ ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" ,example.guid )
logger.info("""tokens: %s""" ,""" """.join([str(a_ ) for x in tokens] ) )
logger.info("""input_ids: %s""" ,""" """.join([str(a_ ) for x in input_ids] ) )
logger.info("""input_mask: %s""" ,""" """.join([str(a_ ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" ,""" """.join([str(a_ ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" ,""" """.join([str(a_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_UpperCAmelCase : Tuple = None
features.append(
InputFeatures(
input_ids=a_ ,attention_mask=a_ ,token_type_ids=a_ ,label_ids=a_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = nn.CrossEntropyLoss().ignore_index
def __init__( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ = None ,a_=False ,a_ = Split.train ,) -> List[Any]:
# Load data features from cache or dataset file
_UpperCAmelCase : List[str] = os.path.join(
a_ ,"""cached_{}_{}_{}""".format(mode.value ,tokenizer.__class__.__name__ ,str(a_ ) ) ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase : Optional[Any] = cached_features_file + """.lock"""
with FileLock(a_ ):
if os.path.exists(a_ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
_UpperCAmelCase : Optional[Any] = torch.load(a_ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
_UpperCAmelCase : Any = token_classification_task.read_examples_from_file(a_ ,a_ )
# TODO clean up all this to leverage built-in features of tokenizers
_UpperCAmelCase : Optional[int] = token_classification_task.convert_examples_to_features(
a_ ,a_ ,a_ ,a_ ,cls_token_at_end=bool(model_type in ["""xlnet"""] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=a_ ,pad_on_left=bool(tokenizer.padding_side == """left""" ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features ,a_ )
def __len__( self ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self ,a_ ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = -100
def __init__( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ = None ,a_=False ,a_ = Split.train ,) -> Dict:
_UpperCAmelCase : Tuple = token_classification_task.read_examples_from_file(a_ ,a_ )
# TODO clean up all this to leverage built-in features of tokenizers
_UpperCAmelCase : str = token_classification_task.convert_examples_to_features(
a_ ,a_ ,a_ ,a_ ,cls_token_at_end=bool(model_type in ["""xlnet"""] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=a_ ,pad_on_left=bool(tokenizer.padding_side == """left""" ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_UpperCAmelCase : List[Any] = tf.data.Dataset.from_generator(
a_ ,({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) ,(
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) ,)
else:
_UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
a_ ,({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) ,(
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) ,)
def _snake_case ( self ) -> int:
_UpperCAmelCase : Tuple = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self ,a_ ) -> InputFeatures:
return self.features[i]
| 349 |
'''simple docstring'''
from datetime import datetime
import requests
def snake_case_ ( lowerCAmelCase_ )-> bytes:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
_UpperCAmelCase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
A_ : Union[str, Any] = input("""Enter Video/IGTV url: """).strip()
A_ : Dict = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 349 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowerCAmelCase__ : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
lowerCAmelCase__ : Optional[int] = DisjunctiveConstraint(lowercase_ )
self.assertTrue(isinstance(dc.token_ids ,lowercase_ ) )
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self : Tuple ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowerCAmelCase__ : List[str] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint(lowercase_ ) # fails here
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : str = [[1, 2, 3], [1, 2, 4]]
lowerCAmelCase__ : Optional[int] = DisjunctiveConstraint(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = dc.update(1 )
lowerCAmelCase__ : int = stepped is True and completed is False and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = dc.update(2 )
lowerCAmelCase__ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = dc.update(3 )
lowerCAmelCase__ : Any = stepped is True and completed is True and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : Optional[int] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCAmelCase__ : List[Any] = DisjunctiveConstraint(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Dict = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 106 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = StableDiffusionLDMaDPipeline
lowercase_ = TEXT_TO_IMAGE_PARAMS
lowercase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase_ ( self : str ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
SCREAMING_SNAKE_CASE_ = CLIPTextModel(_a )
SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple=0 ):
if str(_a ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_a )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_a ).manual_seed(_a )
SCREAMING_SNAKE_CASE_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = StableDiffusionLDMaDPipeline(**_a )
SCREAMING_SNAKE_CASE_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_a )
SCREAMING_SNAKE_CASE_ = ldmad_pipe(**_a )
SCREAMING_SNAKE_CASE_ = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ = rgb[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
SCREAMING_SNAKE_CASE_ = np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
SCREAMING_SNAKE_CASE_ = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = StableDiffusionLDMaDPipeline(**_a )
SCREAMING_SNAKE_CASE_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_a )
SCREAMING_SNAKE_CASE_ = 3 * [inputs["prompt"]]
# forward
SCREAMING_SNAKE_CASE_ = ldmad_pipe(**_a )
SCREAMING_SNAKE_CASE_ = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ = rgb_slice_a[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = depth_slice_a[0, -3:, -1]
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_a )
SCREAMING_SNAKE_CASE_ = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE_ = ldmad_pipe.tokenizer(
_a , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_a , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ = text_inputs["input_ids"].to(_a )
SCREAMING_SNAKE_CASE_ = ldmad_pipe.text_encoder(_a )[0]
SCREAMING_SNAKE_CASE_ = prompt_embeds
# forward
SCREAMING_SNAKE_CASE_ = ldmad_pipe(**_a )
SCREAMING_SNAKE_CASE_ = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ = rgb_slice_a[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_a )
SCREAMING_SNAKE_CASE_ = StableDiffusionLDMaDPipeline(**_a )
SCREAMING_SNAKE_CASE_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(_a )
SCREAMING_SNAKE_CASE_ = "french fries"
SCREAMING_SNAKE_CASE_ = ldmad_pipe(**_a , negative_prompt=_a )
SCREAMING_SNAKE_CASE_ = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ = rgb[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
SCREAMING_SNAKE_CASE_ = np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
SCREAMING_SNAKE_CASE_ = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]="cpu" , _lowerCAmelCase : int=torch.floataa , _lowerCAmelCase : Tuple=0 ):
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_a ).manual_seed(_a )
SCREAMING_SNAKE_CASE_ = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
SCREAMING_SNAKE_CASE_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
SCREAMING_SNAKE_CASE_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE_ = self.get_inputs(_a )
SCREAMING_SNAKE_CASE_ = ldmad_pipe(**_a )
SCREAMING_SNAKE_CASE_ = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ = rgb[0, -3:, -3:, -1].flatten()
SCREAMING_SNAKE_CASE_ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
SCREAMING_SNAKE_CASE_ = np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
SCREAMING_SNAKE_CASE_ = np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Any="cpu" , _lowerCAmelCase : Any=torch.floataa , _lowerCAmelCase : Optional[int]=0 ):
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_a ).manual_seed(_a )
SCREAMING_SNAKE_CASE_ = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
SCREAMING_SNAKE_CASE_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE_ = self.get_inputs(_a )
SCREAMING_SNAKE_CASE_ = ldmad_pipe(**_a )
SCREAMING_SNAKE_CASE_ = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ = 0.49_5586
SCREAMING_SNAKE_CASE_ = 0.3379_5515
SCREAMING_SNAKE_CASE_ = 112.48_518
SCREAMING_SNAKE_CASE_ = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE_ = self.get_inputs(_a )
SCREAMING_SNAKE_CASE_ = ldmad_pipe(**_a )
SCREAMING_SNAKE_CASE_ = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ = 0.419_4127
SCREAMING_SNAKE_CASE_ = 0.3537_5586
SCREAMING_SNAKE_CASE_ = 0.563_8502
SCREAMING_SNAKE_CASE_ = 0.3468_6103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3 | 370 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> List[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
SCREAMING_SNAKE_CASE_ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
SCREAMING_SNAKE_CASE_ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
SCREAMING_SNAKE_CASE_ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
SCREAMING_SNAKE_CASE_ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
SCREAMING_SNAKE_CASE_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
SCREAMING_SNAKE_CASE_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
SCREAMING_SNAKE_CASE_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
SCREAMING_SNAKE_CASE_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
SCREAMING_SNAKE_CASE_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
SCREAMING_SNAKE_CASE_ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
SCREAMING_SNAKE_CASE_ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
SCREAMING_SNAKE_CASE_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
SCREAMING_SNAKE_CASE_ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
SCREAMING_SNAKE_CASE_ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ) -> List[str]:
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
SCREAMING_SNAKE_CASE_ = key.split('.' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = int(key_split[2] ), int(key_split[4] )
SCREAMING_SNAKE_CASE_ = config.vision_config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE_ = val[:dim, :]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ = val[:dim]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
SCREAMING_SNAKE_CASE_ = key.split('.' )
SCREAMING_SNAKE_CASE_ = int(key_split[3] )
SCREAMING_SNAKE_CASE_ = config.text_config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE_ = val[:dim, :]
SCREAMING_SNAKE_CASE_ = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ = val[:dim]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ = val[-dim:]
else:
SCREAMING_SNAKE_CASE_ = rename_key(__UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
SCREAMING_SNAKE_CASE_ = val.squeeze_()
else:
SCREAMING_SNAKE_CASE_ = val
return orig_state_dict
def UpperCAmelCase_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Dict="groupvit-gcc-yfcc" , __UpperCAmelCase : Any=False ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = GroupViTConfig()
SCREAMING_SNAKE_CASE_ = GroupViTModel(__UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCAmelCase , map_location='cpu' )['model']
SCREAMING_SNAKE_CASE_ = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__UpperCAmelCase ) == 0)
# verify result
SCREAMING_SNAKE_CASE_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
SCREAMING_SNAKE_CASE_ = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
SCREAMING_SNAKE_CASE_ = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(f"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image , __UpperCAmelCase , atol=1E-3 )
processor.save_pretrained(__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
print('Successfully saved processor and model to' , __UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(__UpperCAmelCase , organization='nielsr' )
model.push_to_hub(__UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
lowerCamelCase__ : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 210 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : bool , _UpperCAmelCase : list[int] , _UpperCAmelCase : float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(_UpperCAmelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , )
def UpperCamelCase_ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase : int = [90, 23, 6, 33, 21, 65, 123, 34_423]
_UpperCAmelCase : List[Any] = math.log(len(_UpperCAmelCase ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 31 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Union[str, Any] = '▁'
lowercase : Tuple = {'vocab_file': 'spiece.model'}
lowercase : Dict = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
lowercase : Any = {
'google/reformer-crime-and-punishment': 524288,
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
def __init__( self :int , a :List[Any] , a :Tuple="</s>" , a :str="<unk>" , a :Dict=[] , a :Optional[Dict[str, Any]] = None , **a :Union[str, Any] , ) -> None:
__UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a , unk_token=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
__UpperCamelCase : Optional[Any] = vocab_file
__UpperCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@property
def _lowerCamelCase ( self :Optional[Any] ) -> Any:
return self.sp_model.get_piece_size()
def _lowerCamelCase ( self :Optional[int] ) -> Dict[str, int]:
__UpperCamelCase : str = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :str ) -> List[str]:
__UpperCamelCase : Union[str, Any] = self.__dict__.copy()
__UpperCamelCase : Optional[Any] = None
return state
def __setstate__( self :int , a :List[str] ) -> int:
__UpperCamelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : int = {}
__UpperCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self :List[Any] , a :str ) -> List[str]:
return self.sp_model.encode(a , out_type=a )
def _lowerCamelCase ( self :Optional[int] , a :Optional[Any] ) -> str:
return self.sp_model.piece_to_id(a )
def _lowerCamelCase ( self :Dict , a :Union[str, Any] ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
__UpperCamelCase : Optional[int] = self.sp_model.IdToPiece(a )
return token
def _lowerCamelCase ( self :Dict , a :List[Any] ) -> Dict:
__UpperCamelCase : Optional[int] = []
__UpperCamelCase : str = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a ) + token
__UpperCamelCase : List[Any] = []
else:
current_sub_tokens.append(a )
out_string += self.sp_model.decode(a )
return out_string.strip()
def _lowerCamelCase ( self :Optional[Any] , a :str , a :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : List[Any] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , "wb" ) as fi:
__UpperCamelCase : int = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,) | 232 | 0 |
'''simple docstring'''
from __future__ import annotations
def _a( UpperCamelCase__ : list[int], UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[list[int]] =[]
SCREAMING_SNAKE_CASE__ : list[int] =[]
SCREAMING_SNAKE_CASE__ : List[Any] =0
SCREAMING_SNAKE_CASE__ : List[Any] =sum(UpperCamelCase__ )
create_state_space_tree(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
return result
def _a( UpperCamelCase__ : list[int], UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : list[int], UpperCamelCase__ : list[list[int]], UpperCamelCase__ : int, ):
'''simple docstring'''
if sum(UpperCamelCase__ ) > max_sum or (remaining_nums_sum + sum(UpperCamelCase__ )) < max_sum:
return
if sum(UpperCamelCase__ ) == max_sum:
result.append(UpperCamelCase__ )
return
for index in range(UpperCamelCase__, len(UpperCamelCase__ ) ):
create_state_space_tree(
UpperCamelCase__, UpperCamelCase__, index + 1, [*path, nums[index]], UpperCamelCase__, remaining_nums_sum - nums[index], )
a_ = [3, 3_4, 4, 1_2, 5, 2]
a_ = 9
a_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result) | 222 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ = 'src/diffusers'
# Matches is_xxx_available()
a_ = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
a_ = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
a_ = '\n{0} = None\n'
a_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
a_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _a( UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =_re_backend.findall(UpperCamelCase__ )
if len(UpperCamelCase__ ) == 0:
return None
return "_and_".join(UpperCamelCase__ )
def _a( ):
'''simple docstring'''
with open(os.path.join(UpperCamelCase__, '''__init__.py''' ), '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ : List[str] =f.readlines()
# Get to the point we do the actual imports for type checking
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : List[str] ={}
# Go through the end of the file
while line_index < len(UpperCamelCase__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
SCREAMING_SNAKE_CASE__ : List[Any] =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ : List[Any] =[]
# Until we unindent, add backend objects to the list
while line_index < len(UpperCamelCase__ ) and len(lines[line_index] ) > 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] =lines[line_index]
SCREAMING_SNAKE_CASE__ : Optional[Any] =_re_single_line_import.search(UpperCamelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(UpperCamelCase__ ) > 0:
SCREAMING_SNAKE_CASE__ : Any =objects
else:
line_index += 1
return backend_specific_objects
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[Any] ):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(UpperCamelCase__ )
elif name.islower():
return DUMMY_FUNCTION.format(UpperCamelCase__, UpperCamelCase__ )
else:
return DUMMY_CLASS.format(UpperCamelCase__, UpperCamelCase__ )
def _a( UpperCamelCase__ : Any=None ):
'''simple docstring'''
if backend_specific_objects is None:
SCREAMING_SNAKE_CASE__ : int =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for backend, objects in backend_specific_objects.items():
SCREAMING_SNAKE_CASE__ : Tuple ='''[''' + ''', '''.join(f"\"{b}\"" for b in backend.split('''_and_''' ) ) + ''']'''
SCREAMING_SNAKE_CASE__ : List[str] ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(UpperCamelCase__, UpperCamelCase__ ) for o in objects] )
SCREAMING_SNAKE_CASE__ : Tuple =dummy_file
return dummy_files
def _a( UpperCamelCase__ : Any=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
SCREAMING_SNAKE_CASE__ : List[str] ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
SCREAMING_SNAKE_CASE__ : List[str] =os.path.join(UpperCamelCase__, '''utils''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={
backend: os.path.join(UpperCamelCase__, f"dummy_{short_names.get(UpperCamelCase__, UpperCamelCase__ )}_objects.py" )
for backend in dummy_files.keys()
}
SCREAMING_SNAKE_CASE__ : str ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(UpperCamelCase__ ):
with open(UpperCamelCase__, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] =f.read()
else:
SCREAMING_SNAKE_CASE__ : int =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"Updating diffusers.utils.dummy_{short_names.get(UpperCamelCase__, UpperCamelCase__ )}_objects.py as the main "
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend], '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
f"diffusers.utils.dummy_{short_names.get(UpperCamelCase__, UpperCamelCase__ )}_objects.py. Run `make fix-copies` "
'''to fix this.''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a_ = parser.parse_args()
check_dummies(args.fix_and_overwrite) | 222 | 1 |
"""simple docstring"""
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ,lowercase_ : Dict="sayef/fsner-bert-base-uncased" ):
super(lowercase_ ,self ).__init__()
lowerCAmelCase__ : int = AutoModel.from_pretrained(lowercase_ ,return_dict=lowercase_ )
lowerCAmelCase__ : Optional[int] = torch.nn.CosineSimilarity(3 ,1E-08 )
lowerCAmelCase__ : List[str] = torch.nn.Softmax(dim=1 )
def __lowerCAmelCase ( self : str ,**lowercase_ : int ):
return self.bert(**lowercase_ ).last_hidden_state
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Optional[int] ):
return token_embeddings.sum(2 ,keepdim=lowercase_ )
def __lowerCAmelCase ( self : Dict ,lowercase_ : int ,lowercase_ : str ,lowercase_ : Tuple=1 ):
return self.softmax(T * self.cos(lowercase_ ,lowercase_ ) )
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : str ,lowercase_ : Union[str, Any] ):
lowerCAmelCase__ : List[Any] = W_supports['''sizes'''].tolist()
lowerCAmelCase__ : Dict = W_supports['''start_token_id'''].item()
lowerCAmelCase__ : Union[str, Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCAmelCase__ : Optional[Any] = self.BERT(**lowercase_ )
lowerCAmelCase__ : int = self.BERT(**lowercase_ )
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : int = W_supports['''input_ids'''] == start_token_id
lowerCAmelCase__ : Optional[Any] = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(lowercase_ ):
if i == 0:
lowerCAmelCase__ : str = 0
else:
lowerCAmelCase__ : List[Any] = support_sizes[i - 1]
lowerCAmelCase__ : Optional[Any] = S[s : s + size][start_token_masks[s : s + size]]
lowerCAmelCase__ : List[Any] = S[s : s + size][end_token_masks[s : s + size]]
lowerCAmelCase__ : Union[str, Any] = torch.matmul(q[i] ,s_start.T ).sum(1 ).softmax(0 )
lowerCAmelCase__ : Any = torch.matmul(q[i] ,s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCAmelCase__ : List[Any] = torch.vstack((p_starts, p_start) )
lowerCAmelCase__ : List[Any] = torch.vstack((p_ends, p_end) )
else:
lowerCAmelCase__ : Union[str, Any] = p_start
lowerCAmelCase__ : str = p_end
return p_starts, p_ends
| 106 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = to_pil_image(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = pil_image.size
lowercase = pytesseract.image_to_data(__SCREAMING_SNAKE_CASE , lang=__SCREAMING_SNAKE_CASE , output_type='dict' , config=__SCREAMING_SNAKE_CASE )
lowercase , lowercase , lowercase , lowercase , lowercase = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowercase = [idx for idx, word in enumerate(__SCREAMING_SNAKE_CASE ) if not word.strip()]
lowercase = [word for idx, word in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase = []
for x, y, w, h in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = [x, y, x + w, y + h]
actual_boxes.append(__SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
lowercase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = ["""pixel_values"""]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BILINEAR , snake_case = True , snake_case = 1 / 255 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , snake_case = None , snake_case = "" , **snake_case , ):
super().__init__(**snake_case )
lowercase = size if size is not None else {'height': 224, 'width': 224}
lowercase = get_size_dict(snake_case )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_rescale
lowercase = rescale_value
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase = apply_ocr
lowercase = ocr_lang
lowercase = tesseract_config
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = PILImageResampling.BILINEAR , snake_case = None , **snake_case , ):
lowercase = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase = (size['height'], size['width'])
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ):
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = None , snake_case=None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = size if size is not None else self.size
lowercase = get_size_dict(snake_case )
lowercase = resample if resample is not None else self.resample
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(snake_case ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
lowercase = []
lowercase = []
for image in images:
lowercase , lowercase = apply_tesseract(snake_case , snake_case , snake_case )
words_batch.append(snake_case )
boxes_batch.append(snake_case )
if do_resize:
lowercase = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
lowercase = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
lowercase = BatchFeature(data={'pixel_values': images} , tensor_type=snake_case )
if apply_ocr:
lowercase = words_batch
lowercase = boxes_batch
return data
| 195 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=13, __magic_name__=7, __magic_name__=True, __magic_name__=True, __magic_name__=True, __magic_name__=True, __magic_name__=99, __magic_name__=32, __magic_name__=2, __magic_name__=4, __magic_name__=37, __magic_name__="gelu", __magic_name__=0.1, __magic_name__=0.1, __magic_name__=512, __magic_name__=16, __magic_name__=2, __magic_name__=0.02, __magic_name__=3, __magic_name__=4, __magic_name__=None, ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = parent
UpperCamelCase__ : List[Any] = 13
UpperCamelCase__ : Tuple = 7
UpperCamelCase__ : Union[str, Any] = True
UpperCamelCase__ : List[str] = True
UpperCamelCase__ : Dict = True
UpperCamelCase__ : Tuple = True
UpperCamelCase__ : str = 99
UpperCamelCase__ : List[str] = 384
UpperCamelCase__ : Union[str, Any] = 2
UpperCamelCase__ : Tuple = 4
UpperCamelCase__ : Optional[Any] = 37
UpperCamelCase__ : Tuple = '''gelu'''
UpperCamelCase__ : Any = 0.1
UpperCamelCase__ : Optional[int] = 0.1
UpperCamelCase__ : Optional[int] = 512
UpperCamelCase__ : Optional[Any] = 16
UpperCamelCase__ : List[str] = 2
UpperCamelCase__ : str = 0.02
UpperCamelCase__ : str = 3
UpperCamelCase__ : Dict = 4
UpperCamelCase__ : Optional[int] = 128
UpperCamelCase__ : Optional[int] = 2
UpperCamelCase__ : Optional[Any] = 9
UpperCamelCase__ : Any = 1
UpperCamelCase__ : List[str] = None
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ : Any = None
if self.use_input_mask:
UpperCamelCase__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : Optional[int] = None
if self.use_labels:
UpperCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCamelCase__ : List[Any] = ids_tensor([self.batch_size], self.num_choices )
UpperCamelCase__ : Dict = ConvBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=__magic_name__, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = TFConvBertModel(config=__magic_name__ )
UpperCamelCase__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase__ : str = [input_ids, input_mask]
UpperCamelCase__ : Dict = model(__magic_name__ )
UpperCamelCase__ : List[str] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : int = TFConvBertForMaskedLM(config=__magic_name__ )
UpperCamelCase__ : Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.num_labels
UpperCamelCase__ : Any = TFConvBertForSequenceClassification(config=__magic_name__ )
UpperCamelCase__ : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : Tuple = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> int:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.num_choices
UpperCamelCase__ : Dict = TFConvBertForMultipleChoice(config=__magic_name__ )
UpperCamelCase__ : Optional[int] = tf.tile(tf.expand_dims(__magic_name__, 1 ), (1, self.num_choices, 1) )
UpperCamelCase__ : Any = tf.tile(tf.expand_dims(__magic_name__, 1 ), (1, self.num_choices, 1) )
UpperCamelCase__ : Optional[Any] = tf.tile(tf.expand_dims(__magic_name__, 1 ), (1, self.num_choices, 1) )
UpperCamelCase__ : Optional[int] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCamelCase__ : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.num_labels
UpperCamelCase__ : Tuple = TFConvBertForTokenClassification(config=__magic_name__ )
UpperCamelCase__ : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : str = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = TFConvBertForQuestionAnswering(config=__magic_name__ )
UpperCamelCase__ : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : List[str] = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str = self.prepare_config_and_inputs()
(
UpperCamelCase__
) : int = config_and_inputs
UpperCamelCase__ : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : List[Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a : Tuple = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a : List[str] = False
a : Tuple = False
a : List[Any] = False
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = TFConvBertModelTester(self )
UpperCamelCase__ : str = ConfigTester(self, config_class=__magic_name__, hidden_size=37 )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Any = True
UpperCamelCase__ : Any = True
if hasattr(__magic_name__, '''use_cache''' ):
UpperCamelCase__ : str = True
UpperCamelCase__ : Optional[Any] = getattr(self.model_tester, '''encoder_seq_length''', self.model_tester.seq_length )
UpperCamelCase__ : Optional[int] = getattr(self.model_tester, '''key_length''', __magic_name__ )
for model_class in self.all_model_classes:
UpperCamelCase__ : Any = self._prepare_for_class(__magic_name__, __magic_name__ )
UpperCamelCase__ : Tuple = model_class(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = len(model(__magic_name__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__, saved_model=__magic_name__ )
UpperCamelCase__ : int = os.path.join(__magic_name__, '''saved_model''', '''1''' )
UpperCamelCase__ : Optional[Any] = tf.keras.models.load_model(__magic_name__ )
UpperCamelCase__ : str = model(__magic_name__ )
if self.is_encoder_decoder:
UpperCamelCase__ : Dict = outputs['''encoder_hidden_states''']
UpperCamelCase__ : int = outputs['''encoder_attentions''']
else:
UpperCamelCase__ : int = outputs['''hidden_states''']
UpperCamelCase__ : int = outputs['''attentions''']
self.assertEqual(len(__magic_name__ ), __magic_name__ )
UpperCamelCase__ : int = getattr(
self.model_tester, '''expected_num_hidden_layers''', self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__magic_name__ ), __magic_name__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ), [self.model_tester.seq_length, self.model_tester.hidden_size], )
self.assertEqual(len(__magic_name__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], )
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Any = True
UpperCamelCase__ : List[Any] = getattr(self.model_tester, '''decoder_seq_length''', self.model_tester.seq_length )
UpperCamelCase__ : str = getattr(self.model_tester, '''encoder_seq_length''', self.model_tester.seq_length )
UpperCamelCase__ : List[Any] = getattr(self.model_tester, '''key_length''', __magic_name__ )
UpperCamelCase__ : List[str] = getattr(self.model_tester, '''key_length''', __magic_name__ )
def check_decoder_attentions_output(__magic_name__ ):
UpperCamelCase__ : Optional[Any] = len(__magic_name__ )
self.assertEqual(out_len % 2, 0 )
UpperCamelCase__ : Union[str, Any] = outputs.decoder_attentions
self.assertEqual(len(__magic_name__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length], )
def check_encoder_attentions_output(__magic_name__ ):
UpperCamelCase__ : Tuple = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__magic_name__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], )
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = True
UpperCamelCase__ : List[Any] = False
UpperCamelCase__ : List[str] = model_class(__magic_name__ )
UpperCamelCase__ : Optional[Any] = model(self._prepare_for_class(__magic_name__, __magic_name__ ) )
UpperCamelCase__ : Optional[Any] = len(__magic_name__ )
self.assertEqual(config.output_hidden_states, __magic_name__ )
check_encoder_attentions_output(__magic_name__ )
if self.is_encoder_decoder:
UpperCamelCase__ : str = model_class(__magic_name__ )
UpperCamelCase__ : str = model(self._prepare_for_class(__magic_name__, __magic_name__ ) )
self.assertEqual(config.output_hidden_states, __magic_name__ )
check_decoder_attentions_output(__magic_name__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : List[str] = model_class(__magic_name__ )
UpperCamelCase__ : Tuple = model(self._prepare_for_class(__magic_name__, __magic_name__ ) )
self.assertEqual(config.output_hidden_states, __magic_name__ )
check_encoder_attentions_output(__magic_name__ )
# Check attention is always last and order is fine
UpperCamelCase__ : Union[str, Any] = True
UpperCamelCase__ : Optional[Any] = True
UpperCamelCase__ : List[str] = model_class(__magic_name__ )
UpperCamelCase__ : Optional[Any] = model(self._prepare_for_class(__magic_name__, __magic_name__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(__magic_name__ ) )
self.assertEqual(model.config.output_hidden_states, __magic_name__ )
check_encoder_attentions_output(__magic_name__ )
@require_tf
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
UpperCamelCase__ : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ : Optional[int] = model(__magic_name__ )[0]
UpperCamelCase__ : int = [1, 6, 768]
self.assertEqual(output.shape, __magic_name__ )
UpperCamelCase__ : Dict = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3], __magic_name__, atol=1E-4 )
| 363 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
UpperCAmelCase_ = True
from torch.cuda.amp import autocast
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class lowercase__ :
'''simple docstring'''
a : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a : Optional[bool] = field(
default=__lowerCamelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
a : Optional[bool] = field(
default=__lowerCamelCase , metadata={"help": "Whether to log verbose messages or not."} , )
a : Optional[float] = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
a : Optional[float] = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
a : Optional[float] = field(
default=0.9_9_9_9_9_5 , metadata={"help": "Decay of gumbel temperature during training."} )
def lowerCAmelCase_ ( __UpperCAmelCase: ModelArguments , __UpperCAmelCase: TrainingArguments ) -> Any:
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCamelCase__ : Tuple = logging.WARNING
if model_args.verbose_logging:
UpperCamelCase__ : List[Any] = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCamelCase__ : Dict = logging.INFO
logger.setLevel(__UpperCAmelCase )
@dataclass
class lowercase__ :
'''simple docstring'''
a : str = field(
default=__lowerCamelCase , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
a : Optional[str] = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
a : Optional[str] = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
a : Optional[str] = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
a : bool = field(
default=__lowerCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
a : Optional[int] = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
a : Optional[int] = field(
default=__lowerCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
a : Optional[float] = field(
default=2_0.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class lowercase__ :
'''simple docstring'''
a : WavaVecaForPreTraining
a : WavaVecaFeatureExtractor
a : Union[bool, str] = "longest"
a : Optional[int] = None
a : Optional[int] = None
def __call__( self, __magic_name__ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
# reformat list to dict and set to pytorch format
UpperCamelCase__ : List[Any] = self.feature_extractor.pad(
__magic_name__, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='''pt''', )
UpperCamelCase__ : Dict = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
UpperCamelCase__ : Union[str, Any] = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCamelCase__ : List[str] = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
UpperCamelCase__ : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCamelCase__ : str = 1
UpperCamelCase__ : Union[str, Any] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCamelCase__ : Dict = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__magic_name__, min_masks=2, )
return batch
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self, *__magic_name__, __magic_name__=1, __magic_name__=0, __magic_name__=1.0, **__magic_name__ ) -> Dict:
"""simple docstring"""
super().__init__(*__magic_name__, **__magic_name__ )
UpperCamelCase__ : Any = 0
UpperCamelCase__ : List[Any] = max_gumbel_temp
UpperCamelCase__ : List[str] = min_gumbel_temp
UpperCamelCase__ : Any = gumbel_temp_decay
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> torch.Tensor:
"""simple docstring"""
model.train()
UpperCamelCase__ : str = self._prepare_inputs(__magic_name__ )
if self.use_amp:
with autocast():
UpperCamelCase__ : Optional[Any] = self.compute_loss(__magic_name__, __magic_name__ )
else:
UpperCamelCase__ : Tuple = self.compute_loss(__magic_name__, __magic_name__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCamelCase__ : Any = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCamelCase__ : List[str] = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
UpperCamelCase__ : Tuple = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__magic_name__ ).backward()
elif self.use_apex:
with amp.scale_loss(__magic_name__, self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__magic_name__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
return loss.detach()
def lowerCAmelCase_ ( ) -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : str = parser.parse_args_into_dataclasses()
configure_logger(__UpperCAmelCase , __UpperCAmelCase )
# Downloading and loading a dataset from the hub.
UpperCamelCase__ : str = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCamelCase__ : Any = DatasetDict()
UpperCamelCase__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
UpperCamelCase__ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCamelCase__ : int = DatasetDict()
UpperCamelCase__ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
UpperCamelCase__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCamelCase__ : str = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__UpperCAmelCase )
def prepare_dataset(__UpperCAmelCase: Union[str, Any] ):
# check that all files have the correct sampling rate
UpperCamelCase__ ,UpperCamelCase__ : List[str] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCamelCase__ : Any = datasets.map(
__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
UpperCamelCase__ : Tuple = vectorized_datasets.filter(
lambda __UpperCAmelCase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__UpperCAmelCase: Optional[int] ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCamelCase__ : Any = vectorized_datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCamelCase__ : int = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
UpperCamelCase__ : Optional[int] = WavaVecaForPreTraining(__UpperCAmelCase )
UpperCamelCase__ : List[str] = DataCollatorForWavaVecaPretraining(model=__UpperCAmelCase , feature_extractor=__UpperCAmelCase )
UpperCamelCase__ : List[Any] = WavaVecaPreTrainer(
model=__UpperCAmelCase , data_collator=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=__UpperCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 247 | 0 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __lowerCAmelCase ( a__ , a__ , a__ ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__a = (low + high) // 2
__a , __a , __a = max_subarray(a__ , a__ , a__ )
__a , __a , __a = max_subarray(a__ , mid + 1 , a__ )
__a , __a , __a = max_cross_sum(a__ , a__ , a__ , a__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> tuple[int, int, float]:
__a , __a = float('''-inf''' ), -1
__a , __a = float('''-inf''' ), -1
__a = 0
for i in range(a__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__a = summ
__a = i
__a = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__a = summ
__a = i
return max_left, max_right, (left_sum + right_sum)
def __lowerCAmelCase ( a__ ) -> float:
__a = [randint(1 , a__ ) for _ in range(a__ )]
__a = time.time()
max_subarray(a__ , 0 , input_size - 1 )
__a = time.time()
return end - start
def __lowerCAmelCase ( ) -> None:
__a = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
__a = [time_max_subarray(a__ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(a__ , a__ ):
print(a__ , '''\t\t''' , a__ )
plt.plot(a__ , a__ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 6 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 - _cos) / 2
__a = 1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 + _cos) / 2
__a = -1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = _sin / 2
__a = 0
__a = -ba
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 1 - alpha
__a = -2 * _cos
__a = 1 + alpha
__a = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = 1 + alpha * big_a
__a = -2 * _cos
__a = 1 - alpha * big_a
__a = 1 + alpha / big_a
__a = -2 * _cos
__a = 1 - alpha / big_a
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (pmc + aaa)
__a = 2 * big_a * mpc
__a = big_a * (pmc - aaa)
__a = ppmc + aaa
__a = -2 * pmpc
__a = ppmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (ppmc + aaa)
__a = -2 * big_a * pmpc
__a = big_a * (ppmc - aaa)
__a = pmc + aaa
__a = 2 * mpc
__a = pmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt | 6 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "AAPL" ) -> str:
lowerCamelCase__ : Any = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
lowerCamelCase__ : Dict = BeautifulSoup(requests.get(UpperCamelCase ).text , """html.parser""" )
lowerCamelCase__ : int = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 129 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=None , UpperCamelCase=None ) -> Any:
return field(default_factory=lambda: default , metadata=UpperCamelCase )
@dataclass
class _lowercase :
a = field(
metadata={"""help""": """The csv file to plot."""} , )
a = field(
default=_lowercase , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
a = field(
default=_lowercase , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
a = field(
default=_lowercase , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
a = field(
default=_lowercase , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
a = field(
default=_lowercase , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
a = list_field(
default=_lowercase , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
try:
int(UpperCamelCase )
return True
except ValueError:
return False
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
try:
float(UpperCamelCase )
return True
except ValueError:
return False
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: str ):
lowerCamelCase__ : int = args
lowerCamelCase__ : Optional[int] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
lowerCamelCase__ : str = csv.DictReader(UpperCamelCase__ )
for row in reader:
lowerCamelCase__ : Optional[int] = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
lowerCamelCase__ : Tuple = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
lowerCamelCase__ : Any = float(row["""result"""] )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ : Tuple = plt.subplots()
lowerCamelCase__ : Any = """Time usage""" if self.args.is_time else """Memory usage"""
lowerCamelCase__ : List[str] = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowerCamelCase__ : Any = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
lowerCamelCase__ : int = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
lowerCamelCase__ : Any = self.result_dict[model_name]["""result"""]
((lowerCamelCase__) , (lowerCamelCase__)) : Dict = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowerCamelCase__ : Any = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowerCamelCase__ : int = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=UpperCamelCase__ , )
else:
lowerCamelCase__ : List[Any] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((lowerCamelCase__) , (lowerCamelCase__)) : List[str] = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
lowerCamelCase__ : int = np.asarray(UpperCamelCase__ , UpperCamelCase__ )[: len(UpperCamelCase__ )]
plt.scatter(
UpperCamelCase__ , UpperCamelCase__ , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(UpperCamelCase__ , UpperCamelCase__ , """--""" )
title_str += F''' {label_model_name} vs.'''
lowerCamelCase__ : Any = title_str[:-4]
lowerCamelCase__ : Optional[int] = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(UpperCamelCase__ )
plt.xlabel(UpperCamelCase__ )
plt.ylabel(UpperCamelCase__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def SCREAMING_SNAKE_CASE_ () -> str:
lowerCamelCase__ : str = HfArgumentParser(UpperCamelCase )
lowerCamelCase__ : str = parser.parse_args_into_dataclasses()[0]
lowerCamelCase__ : Any = Plot(args=UpperCamelCase )
plot.plot()
if __name__ == "__main__":
main()
| 129 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : int = ['''input_values''', '''padding_mask''']
def __init__( self ,SCREAMING_SNAKE_CASE__ = 1 ,SCREAMING_SNAKE_CASE__ = 2_40_00 ,SCREAMING_SNAKE_CASE__ = 0.0 ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,**SCREAMING_SNAKE_CASE__ ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(feature_size=SCREAMING_SNAKE_CASE__ ,sampling_rate=SCREAMING_SNAKE_CASE__ ,padding_value=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = chunk_length_s
__SCREAMING_SNAKE_CASE :List[str] = overlap
@property
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
__SCREAMING_SNAKE_CASE :List[str] = True
__SCREAMING_SNAKE_CASE :str = bool(
isinstance(SCREAMING_SNAKE_CASE__ ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
__SCREAMING_SNAKE_CASE :List[Any] = [np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray ):
__SCREAMING_SNAKE_CASE :str = np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE :Tuple = [np.asarray(SCREAMING_SNAKE_CASE__ ).T]
# verify inputs are valid
for idx, example in enumerate(SCREAMING_SNAKE_CASE__ ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
__SCREAMING_SNAKE_CASE :Tuple = None
__SCREAMING_SNAKE_CASE :List[Any] = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__SCREAMING_SNAKE_CASE :Dict = min(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE :str = int(np.floor(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE :List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__SCREAMING_SNAKE_CASE :List[Any] = max(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE :Tuple = int(np.ceil(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE :Any = (nb_step - 1) * self.chunk_stride + self.chunk_length
__SCREAMING_SNAKE_CASE :Dict = '''max_length'''
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = input_values
# normal padding on batch
if padded_inputs is None:
__SCREAMING_SNAKE_CASE :Optional[Any] = self.pad(
SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,)
if padding:
__SCREAMING_SNAKE_CASE :Optional[int] = padded_inputs.pop('''attention_mask''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
__SCREAMING_SNAKE_CASE :int = example[..., None]
input_values.append(example.T )
__SCREAMING_SNAKE_CASE :Optional[Any] = input_values
if return_tensors is not None:
__SCREAMING_SNAKE_CASE :Tuple = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE__ )
return padded_inputs | 191 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : Tuple , a_ : str=None ) -> Union[str, Any]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
__SCREAMING_SNAKE_CASE :Dict = nn.Parameter(a_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
__SCREAMING_SNAKE_CASE :Optional[int] = nn.Parameter(a_ )
def __lowerCamelCase ( a_ : Dict , a_ : str , a_ : Optional[int] ) -> Any:
# set torch weights for 1-to-1 comparison
__SCREAMING_SNAKE_CASE :List[Any] = np.asarray(weights[0] )
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[1] )
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(a_ ).view(-1 , a_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( a_ : List[Any] , a_ : Dict , a_ : List[str] ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(weights[0] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(weights[1] )
__SCREAMING_SNAKE_CASE :Any = np.asarray(weights[2] )
__SCREAMING_SNAKE_CASE :Dict = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(a_ ).view(-1 , a_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( a_ : Any , a_ : List[str] , a_ : Optional[int] ) -> Union[str, Any]:
# layernorm 1
__SCREAMING_SNAKE_CASE :Any = weights[0][0][0]
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(layer_norm_a[0] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(a_ ) , torch.tensor(a_ ) , )
# lsh weights + output
__SCREAMING_SNAKE_CASE :List[Any] = weights[0][1]
if len(a_ ) < 4:
set_layer_weights_in_torch_lsh(a_ , torch_block.attention , a_ )
else:
set_layer_weights_in_torch_local(a_ , torch_block.attention , a_ )
# intermediate weighs
__SCREAMING_SNAKE_CASE :List[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(a_ ) == 4:
__SCREAMING_SNAKE_CASE :List[str] = intermediate_weights[2]
# layernorm 2
__SCREAMING_SNAKE_CASE :Tuple = np.asarray(intermediate_weights[0][0] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(a_ ) , torch.tensor(a_ ) , )
# intermediate dense
__SCREAMING_SNAKE_CASE :int = np.asarray(intermediate_weights[1][0] )
__SCREAMING_SNAKE_CASE :int = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(a_ ).transpose(0 , 1 ).contiguous() , torch.tensor(a_ ) , )
# intermediate out
__SCREAMING_SNAKE_CASE :str = np.asarray(intermediate_weights[4][0] )
__SCREAMING_SNAKE_CASE :str = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(a_ ).transpose(0 , 1 ).contiguous() , torch.tensor(a_ ) , )
def __lowerCamelCase ( a_ : List[str] , a_ : str , a_ : List[Any] ) -> Optional[Any]:
# reformer model
__SCREAMING_SNAKE_CASE :Dict = torch_model.reformer
# word embeds
__SCREAMING_SNAKE_CASE :List[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(a_ ) , )
if isinstance(weights[3] , a_ ):
__SCREAMING_SNAKE_CASE :List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__SCREAMING_SNAKE_CASE :List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
__SCREAMING_SNAKE_CASE :str = nn.Parameter(torch.tensor(a_ ) )
__SCREAMING_SNAKE_CASE :Optional[int] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
a_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__SCREAMING_SNAKE_CASE :Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(a_ , a_ , a_ )
# output layer norm
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[7][0] )
__SCREAMING_SNAKE_CASE :List[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(a_ ) , torch.tensor(a_ ) , )
# output embeddings
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[9][0] )
__SCREAMING_SNAKE_CASE :str = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(a_ ).transpose(0 , 1 ).contiguous() , torch.tensor(a_ ) , )
def __lowerCamelCase ( a_ : Any , a_ : Dict , a_ : Dict ) -> Tuple:
# Initialise PyTorch model
__SCREAMING_SNAKE_CASE :List[str] = ReformerConfig.from_json_file(a_ )
print(f'''Building PyTorch model from configuration: {config}''' )
__SCREAMING_SNAKE_CASE :List[Any] = ReformerModelWithLMHead(a_ )
with open(a_ , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE :Any = pickle.load(a_ )['''weights''']
set_model_weights_in_torch(a_ , a_ , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase_ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 191 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _snake_case ( A__ ):
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=False , a=True , a=99 , a=32 , a=5 , a=4 , a=64 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , a=2 , a=2 , a=2 , a=2 , a=4 , a=1 , ) -> Tuple:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = q_groups
SCREAMING_SNAKE_CASE = k_groups
SCREAMING_SNAKE_CASE = v_groups
SCREAMING_SNAKE_CASE = post_attention_groups
SCREAMING_SNAKE_CASE = intermediate_groups
SCREAMING_SNAKE_CASE = output_groups
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> Tuple:
SCREAMING_SNAKE_CASE = SqueezeBertModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , a)
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = SqueezeBertForMaskedLM(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> Tuple:
SCREAMING_SNAKE_CASE = SqueezeBertForQuestionAnswering(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> List[str]:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SqueezeBertForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SqueezeBertForTokenClassification(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> Dict:
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = SqueezeBertForMultipleChoice(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : Dict = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_lowercase : List[str] = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Any = False
_lowercase : Dict = True
_lowercase : Dict = False
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = SqueezeBertModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , dim=37)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SqueezeBertModel.from_pretrained(a)
self.assertIsNotNone(a)
@require_sentencepiece
@require_tokenizers
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli')
SCREAMING_SNAKE_CASE = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]])
SCREAMING_SNAKE_CASE = model(a)[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 3))
self.assertEqual(output.shape , a)
SCREAMING_SNAKE_CASE = torch.tensor([[0.64_01, -0.03_49, -0.60_41]])
self.assertTrue(torch.allclose(a , a , atol=1E-4))
| 361 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = TFCamembertModel.from_pretrained('jplu/tf-camembert-base')
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE = model(a)['last_hidden_state']
SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10, 768))
self.assertEqual(output.shape , a)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 327 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _snake_case ( lowercase__ : Dict = "AAPL" ) -> int:
'''simple docstring'''
lowerCAmelCase_ :str = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
lowerCAmelCase_ :Union[str, Any] = BeautifulSoup(requests.get(_lowercase ).text , """html.parser""" )
lowerCAmelCase_ :Union[str, Any] = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 84 | import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase__ :
@staticmethod
def __A ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class lowercase__ ( unittest.TestCase):
UpperCamelCase_ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __A ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
SCREAMING_SNAKE_CASE : List[str] = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def __A ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = object_detector(examples[0] , threshold=0.0 )
SCREAMING_SNAKE_CASE : Tuple = len(UpperCamelCase__ )
self.assertGreater(UpperCamelCase__ , 0 )
self.assertEqual(
UpperCamelCase__ , [
{
'''score''': ANY(UpperCamelCase__ ),
'''label''': ANY(UpperCamelCase__ ),
'''box''': {'''xmin''': ANY(UpperCamelCase__ ), '''ymin''': ANY(UpperCamelCase__ ), '''xmax''': ANY(UpperCamelCase__ ), '''ymax''': ANY(UpperCamelCase__ )},
}
for i in range(UpperCamelCase__ )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
SCREAMING_SNAKE_CASE : str = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
SCREAMING_SNAKE_CASE : str = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = pipeline('''zero-shot-object-detection''' )
SCREAMING_SNAKE_CASE : Optional[int] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
SCREAMING_SNAKE_CASE : int = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __A ( self : str ):
'''simple docstring'''
pass
@require_torch
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 0.2
SCREAMING_SNAKE_CASE : Optional[int] = pipeline('''zero-shot-object-detection''' )
SCREAMING_SNAKE_CASE : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline('''zero-shot-object-detection''' )
SCREAMING_SNAKE_CASE : List[str] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , )
| 182 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
lowerCAmelCase , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 248 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=a_ )
class UpperCamelCase_ ( a_ ):
_A : str = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_A : ClassVar[Features] = Features({'image': Image()} )
_A : ClassVar[Features] = Features({'labels': ClassLabel} )
_A : str = "image"
_A : str = "labels"
def UpperCamelCase_ ( self , snake_case__ ) -> List[str]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , snake_case__ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
UpperCAmelCase = copy.deepcopy(self )
UpperCAmelCase = self.label_schema.copy()
UpperCAmelCase = features[self.label_column]
UpperCAmelCase = label_schema
return task_template
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 248 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Any )-> List[Any]:
'''simple docstring'''
A__ = 1_0
def snake_case__ ( self : List[Any] )-> str:
'''simple docstring'''
A__ = [1, 2, 3, 4]
A__ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowercase_,self.block_size,0 ),lowercase_ )
def snake_case__ ( self : Dict )-> List[Any]:
'''simple docstring'''
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(lowercase_,self.block_size,0 ),lowercase_ )
def snake_case__ ( self : Any )-> Tuple:
'''simple docstring'''
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(lowercase_,self.block_size,0 ),lowercase_ )
def snake_case__ ( self : str )-> Union[str, Any]:
'''simple docstring'''
A__ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
A__ , A__ = process_story(lowercase_ )
self.assertEqual(lowercase_,[] )
def snake_case__ ( self : Any )-> Optional[int]:
'''simple docstring'''
A__ = ''
A__ , A__ = process_story(lowercase_ )
self.assertEqual(lowercase_,[] )
self.assertEqual(lowercase_,[] )
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
A__ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
A__ , A__ = process_story(lowercase_ )
A__ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(lowercase_,lowercase_ )
A__ = ['It was the best of times.']
self.assertEqual(lowercase_,lowercase_ )
def snake_case__ ( self : Optional[Any] )-> Any:
'''simple docstring'''
A__ = torch.tensor([1, 2, 3, 4] )
A__ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowercase_,0 ).numpy(),expected.numpy() )
def snake_case__ ( self : Any )-> Any:
'''simple docstring'''
A__ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
A__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase_,2_3 ).numpy(),expected.numpy() )
def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase_,1 ).numpy(),expected.numpy() )
def snake_case__ ( self : Any )-> str:
'''simple docstring'''
A__ = 1_0_1
A__ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
A__ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A__ = compute_token_type_ids(lowercase_,lowercase_ )
np.testing.assert_array_equal(lowercase_,lowercase_ )
| 7 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
A__ = 384
A__ = 7
if "tiny" in model_name:
A__ = 96
A__ = (2, 2, 6, 2)
A__ = (3, 6, 12, 24)
elif "small" in model_name:
A__ = 96
A__ = (2, 2, 18, 2)
A__ = (3, 6, 12, 24)
elif "base" in model_name:
A__ = 128
A__ = (2, 2, 18, 2)
A__ = (4, 8, 16, 32)
A__ = 12
A__ = 512
elif "large" in model_name:
A__ = 192
A__ = (2, 2, 18, 2)
A__ = (6, 12, 24, 48)
A__ = 12
A__ = 768
# set label information
A__ = 150
A__ = 'huggingface/label-files'
A__ = 'ade20k-id2label.json'
A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
A__ = {v: k for k, v in idalabel.items()}
A__ = SwinConfig(
embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , window_size=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
A__ = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE__ , auxiliary_in_channels=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , )
return config
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = dct.pop(SCREAMING_SNAKE_CASE__ )
A__ = val
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
'''simple docstring'''
A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:dim, :]
A__ = in_proj_bias[: dim]
A__ = in_proj_weight[
dim : dim * 2, :
]
A__ = in_proj_bias[
dim : dim * 2
]
A__ = in_proj_weight[
-dim :, :
]
A__ = in_proj_bias[-dim :]
# fmt: on
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
A__ , A__ = x.shape
A__ = x.reshape(SCREAMING_SNAKE_CASE__ , 4 , in_channel // 4 )
A__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
'''simple docstring'''
A__ , A__ = x.shape
A__ = x.reshape(SCREAMING_SNAKE_CASE__ , in_channel // 4 , 4 )
A__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
'''simple docstring'''
A__ = x.shape[0]
A__ = x.reshape(4 , in_channel // 4 )
A__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
'''simple docstring'''
A__ = x.shape[0]
A__ = x.reshape(in_channel // 4 , 4 )
A__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
A__ = model_name_to_url[model_name]
A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' , file_name=SCREAMING_SNAKE_CASE__ )[
'state_dict'
]
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE__ , param.shape )
A__ = get_upernet_config(SCREAMING_SNAKE_CASE__ )
A__ = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "bn" in key:
A__ = key.replace('bn' , 'batch_norm' )
A__ = val
# rename keys
A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
A__ = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE__ )
if "norm" in key:
A__ = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify on image
A__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('RGB' )
A__ = SegformerImageProcessor()
A__ = processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
A__ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
A__ = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
A__ = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
A__ = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 7 | 1 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(a__ ) == len(a__ ), F'{len(a__ )} != {len(a__ )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
UpperCAmelCase : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
UpperCAmelCase : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def a__ ( a__ , a__ ):
"""simple docstring"""
try:
__SCREAMING_SNAKE_CASE = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(a__ ) )
def a__ ( a__ , a__ ):
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(a__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( a__ , a__ = "student" , a__ = None , a__ = None , a__=False , a__=None , a__=None , **a__ , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(a__ , a__ ):
AutoTokenizer.from_pretrained(a__ ).save_pretrained(a__ ) # purely for convenience
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(a__ ).eval()
else:
assert isinstance(a__ , a__ ), F'teacher must be a model or string got type {type(a__ )}'
__SCREAMING_SNAKE_CASE = teacher.config.to_diff_dict()
try:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__SCREAMING_SNAKE_CASE = teacher_e
if d is None:
__SCREAMING_SNAKE_CASE = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__SCREAMING_SNAKE_CASE = teacher_e
if d is None:
__SCREAMING_SNAKE_CASE = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(a__ )
# Copy weights
__SCREAMING_SNAKE_CASE = teacher.config_class(**a__ )
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_config(a__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__SCREAMING_SNAKE_CASE = student.load_state_dict(teacher.state_dict() , strict=a__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = list(range(a__ ) ), list(range(a__ ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(a__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__SCREAMING_SNAKE_CASE = pick_layers_to_copy(a__ , a__ )
if d_layers_to_copy is None:
__SCREAMING_SNAKE_CASE = pick_layers_to_copy(a__ , a__ )
try:
if hasattr(
a__ , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , a__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , a__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , a__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , a__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , a__ )
copy_layers(teacher.decoder.block , student.decoder.block , a__ )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
__SCREAMING_SNAKE_CASE = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(a__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 357 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase : Any = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCAmelCase : Optional[Any] = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase : Dict = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase : Optional[Any] = sorted(arg_to_scheduler.keys())
UpperCAmelCase : str = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCAmelCase__ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : argparse.Namespace , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict="base" , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = Path(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = config
__SCREAMING_SNAKE_CASE = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
assert hasattr(self.config , __SCREAMING_SNAKE_CASE ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __SCREAMING_SNAKE_CASE , getattr(self.hparams , __SCREAMING_SNAKE_CASE ) )
if tokenizer is None:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = MODEL_MODES[mode]
if model is None:
__SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = model
def UpperCAmelCase__ ( self : List[str] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = arg_to_scheduler[self.hparams.lr_scheduler]
__SCREAMING_SNAKE_CASE = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__SCREAMING_SNAKE_CASE = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model
__SCREAMING_SNAKE_CASE = ["""bias""", """LayerNorm.weight"""]
__SCREAMING_SNAKE_CASE = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
__SCREAMING_SNAKE_CASE = Adafactor(
__SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , scale_parameter=__SCREAMING_SNAKE_CASE , relative_step=__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = AdamW(
__SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__SCREAMING_SNAKE_CASE = optimizer
__SCREAMING_SNAKE_CASE = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
return self.validation_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.validation_end(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__SCREAMING_SNAKE_CASE = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
"""simple docstring"""
if stage == "test":
__SCREAMING_SNAKE_CASE = len(self.test_dataloader().dataset )
else:
__SCREAMING_SNAKE_CASE = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(self.train_dataloader().dataset )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> int:
"""simple docstring"""
raise NotImplementedError("""You must implement this for your task""" )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.train_loader
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
__SCREAMING_SNAKE_CASE , list(filter(__SCREAMING_SNAKE_CASE , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict[str, Any] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.output_dir.joinpath("""best_tfmr""" )
__SCREAMING_SNAKE_CASE = self.step_count
self.model.save_pretrained(__SCREAMING_SNAKE_CASE )
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
@staticmethod
def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=__SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(__SCREAMING_SNAKE_CASE ).parent / """test_run""" / """cache""" ) , type=__SCREAMING_SNAKE_CASE , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5E-5 , type=__SCREAMING_SNAKE_CASE , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=__SCREAMING_SNAKE_CASE , metavar=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__SCREAMING_SNAKE_CASE , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__SCREAMING_SNAKE_CASE , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--train_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--eval_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = trainer.lr_schedulers[0]["""scheduler"""]
__SCREAMING_SNAKE_CASE = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> List[Any]:
"""simple docstring"""
rank_zero_info("""***** Validation results *****""" )
__SCREAMING_SNAKE_CASE = trainer.callback_metrics
# Log results
for key in sorted(__SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> str:
"""simple docstring"""
rank_zero_info("""***** Test results *****""" )
__SCREAMING_SNAKE_CASE = trainer.callback_metrics
# Log and save results to file
__SCREAMING_SNAKE_CASE = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(__SCREAMING_SNAKE_CASE , """w""" ) as writer:
for key in sorted(__SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def a__ ( a__ , a__ ):
"""simple docstring"""
parser.add_argument(
"""--output_dir""" , default=str(Path(a__ ).parent / """test_run""" / """model_checkpoints""" ) , type=a__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=a__ , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=a__ )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=a__ , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=a__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=a__ , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(a__ ).parent / """test_run""" / """dummy-train-data""" ) , type=a__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def a__ ( a__ , a__ , a__=None , a__=True , a__=[] , a__=None , a__=None , **a__ , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__SCREAMING_SNAKE_CASE = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=a__ )
# add custom checkpoints
if checkpoint_callback is None:
__SCREAMING_SNAKE_CASE = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(a__ )
if logging_callback is None:
__SCREAMING_SNAKE_CASE = LoggingCallback()
__SCREAMING_SNAKE_CASE = {}
if args.fpaa:
__SCREAMING_SNAKE_CASE = 16
if args.gpus > 1:
__SCREAMING_SNAKE_CASE = """auto"""
__SCREAMING_SNAKE_CASE = """ddp"""
__SCREAMING_SNAKE_CASE = args.accumulate_grad_batches
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = """auto"""
__SCREAMING_SNAKE_CASE = pl.Trainer.from_argparse_args(
a__ , weights_summary=a__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=a__ , val_check_interval=1 , num_sanity_val_steps=2 , **a__ , )
if args.do_train:
trainer.fit(a__ )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 331 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a__ : int = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
@dataclass
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''train'''
__SCREAMING_SNAKE_CASE = '''dev'''
__SCREAMING_SNAKE_CASE = '''test'''
class UpperCAmelCase__ :
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def __lowerCamelCase ( lowercase ) -> List[str]:
raise NotImplementedError
@staticmethod
def __lowerCamelCase ( lowercase , lowercase , lowercase , lowercase , lowercase=False , lowercase="[CLS]" , lowercase=1 , lowercase="[SEP]" , lowercase=False , lowercase=False , lowercase=0 , lowercase=0 , lowercase=-1_0_0 , lowercase=0 , lowercase=True , ) -> List[InputFeatures]:
__UpperCamelCase = {label: i for i, label in enumerate(lowercase )}
__UpperCamelCase = []
for ex_index, example in enumerate(lowercase ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" , lowercase , len(lowercase ) )
__UpperCamelCase = []
__UpperCamelCase = []
for word, label in zip(example.words , example.labels ):
__UpperCamelCase = tokenizer.tokenize(lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowercase ) > 0:
tokens.extend(lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__UpperCamelCase = tokenizer.num_special_tokens_to_add()
if len(lowercase ) > max_seq_length - special_tokens_count:
__UpperCamelCase = tokens[: (max_seq_length - special_tokens_count)]
__UpperCamelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__UpperCamelCase = [sequence_a_segment_id] * len(lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__UpperCamelCase = [cls_token] + tokens
__UpperCamelCase = [pad_token_label_id] + label_ids
__UpperCamelCase = [cls_token_segment_id] + segment_ids
__UpperCamelCase = tokenizer.convert_tokens_to_ids(lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__UpperCamelCase = [1 if mask_padding_with_zero else 0] * len(lowercase )
# Zero-pad up to the sequence length.
__UpperCamelCase = max_seq_length - len(lowercase )
if pad_on_left:
__UpperCamelCase = ([pad_token] * padding_length) + input_ids
__UpperCamelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__UpperCamelCase = ([pad_token_segment_id] * padding_length) + segment_ids
__UpperCamelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
assert len(lowercase ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(lowercase ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(lowercase ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(lowercase ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(lowercase ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__UpperCamelCase = None
features.append(
InputFeatures(
input_ids=lowercase , attention_mask=lowercase , token_type_ids=lowercase , label_ids=lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = nn.CrossEntropyLoss().ignore_index
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase=False , lowercase = Split.train , ) -> str:
# Load data features from cache or dataset file
__UpperCamelCase = os.path.join(
lowercase , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__UpperCamelCase = cached_features_file + """.lock"""
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
__UpperCamelCase = torch.load(lowercase )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
__UpperCamelCase = token_classification_task.read_examples_from_file(lowercase , lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
__UpperCamelCase = token_classification_task.convert_examples_to_features(
lowercase , lowercase , lowercase , lowercase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}" )
torch.save(self.features , lowercase )
def __len__( self ) -> List[Any]:
return len(self.features )
def __getitem__( self , lowercase ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = -1_0_0
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase=False , lowercase = Split.train , ) -> List[Any]:
__UpperCamelCase = token_classification_task.read_examples_from_file(lowercase , lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
__UpperCamelCase = token_classification_task.convert_examples_to_features(
lowercase , lowercase , lowercase , lowercase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowercase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__UpperCamelCase = tf.data.Dataset.from_generator(
lowercase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__UpperCamelCase = tf.data.Dataset.from_generator(
lowercase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> str:
return len(self.features )
def __getitem__( self , lowercase ) -> InputFeatures:
return self.features[i]
| 349 |
'''simple docstring'''
import re
def _lowercase ( __A ):
'''simple docstring'''
return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" ,str_ )]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
try:
__UpperCamelCase = split_input(__A )
if upper:
__UpperCamelCase = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__UpperCamelCase = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _lowercase ( __A ):
'''simple docstring'''
return to_simple_case(__A )
def _lowercase ( __A ):
'''simple docstring'''
try:
__UpperCamelCase = to_simple_case(__A )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _lowercase ( __A ,__A ):
'''simple docstring'''
return to_complex_case(__A ,__A ,"""_""" )
def _lowercase ( __A ,__A ):
'''simple docstring'''
return to_complex_case(__A ,__A ,"""-""" )
if __name__ == "__main__":
__import__('doctest').testmod()
| 349 | 1 |
'''simple docstring'''
import argparse
import copy
def __lowercase ( __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = {}
with open(__lowercase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_A = []
_list.append([line.split()[1], line.split()[2]] )
_A = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_A = []
_list.append([line.split()[0], line.split()[2]] )
_A = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __lowercase ( __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
with open(__lowercase ) as f:
_A = f.read(1 )
_A = start_node
_A = []
_A = start_node
_A = 0
while visiting not in first_solution:
_A = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__lowercase ) and k[0] not in first_solution:
_A = k[1]
_A = k[0]
first_solution.append(__lowercase )
_A = distance_of_first_solution + int(__lowercase )
_A = best_node
first_solution.append(__lowercase )
_A = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_A = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def __lowercase ( __lowercase , __lowercase ) -> int:
'''simple docstring'''
_A = []
for n in solution[1:-1]:
_A = solution.index(__lowercase )
for kn in solution[1:-1]:
_A = solution.index(__lowercase )
if n == kn:
continue
_A = copy.deepcopy(__lowercase )
_A = kn
_A = n
_A = 0
for k in _tmp[:-1]:
_A = _tmp[_tmp.index(__lowercase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_A = distance + int(i[1] )
_tmp.append(__lowercase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_A = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __lowercase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = 1
_A = first_solution
_A = []
_A = distance_of_first_solution
_A = solution
while count <= iters:
_A = find_neighborhood(__lowercase , __lowercase )
_A = 0
_A = neighborhood[index_of_best_solution]
_A = len(__lowercase ) - 1
_A = False
while not found:
_A = 0
while i < len(__lowercase ):
if best_solution[i] != solution[i]:
_A = best_solution[i]
_A = solution[i]
break
_A = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_A = True
_A = best_solution[:-1]
_A = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_A = cost
_A = solution
else:
_A = index_of_best_solution + 1
_A = neighborhood[index_of_best_solution]
if len(__lowercase ) >= size:
tabu_list.pop(0 )
_A = count + 1
return best_solution_ever, best_cost
def __lowercase ( __lowercase=None ) -> Optional[Any]:
'''simple docstring'''
_A = generate_neighbours(args.File )
_A , _A = generate_first_solution(
args.File , __lowercase )
_A , _A = tabu_search(
__lowercase , __lowercase , __lowercase , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 354 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__lowercase , 2 ) + pow(__lowercase , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174 | 0 |
'''simple docstring'''
from manim import *
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : Dict = Rectangle(height=0.5 ,width=0.5 )
lowerCAmelCase__ : Any = Rectangle(height=0.2_5 ,width=0.2_5 )
lowerCAmelCase__ : List[str] = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0 )
lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : List[str] = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 )
lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 )
lowerCAmelCase__ : List[Any] = VGroup(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 )
lowerCAmelCase__ : Optional[Any] = Text("""CPU""" ,font_size=24 )
lowerCAmelCase__ : str = Group(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0.5 ,aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : str = [mem.copy() for i in range(4 )]
lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 )
lowerCAmelCase__ : Optional[int] = Text("""GPU""" ,font_size=24 )
lowerCAmelCase__ : Union[str, Any] = Group(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0.5 ,aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : List[str] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 )
lowerCAmelCase__ : Optional[Any] = Text("""Model""" ,font_size=24 )
lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0.5 ,aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : List[Any] = []
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
lowerCAmelCase__ : str = Rectangle(height=0.4_6 / 4 ,width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase ,opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.0_2 ,direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] ,direction=__UpperCAmelCase ,buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] ,direction=__UpperCAmelCase ,buff=0.0 )
self.add(__UpperCAmelCase )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase ,*__UpperCAmelCase ,*__UpperCAmelCase )
lowerCAmelCase__ : Dict = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : Any = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 )
lowerCAmelCase__ : int = Text("""Loaded Checkpoint""" ,font_size=24 )
lowerCAmelCase__ : str = Group(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0.5 ,aligned_edge=__UpperCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = fill.copy().set_fill(__UpperCAmelCase ,opacity=0.7 )
target.move_to(__UpperCAmelCase )
ckpt_arr.append(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase ,*__UpperCAmelCase )
lowerCAmelCase__ : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase__ : Any = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : int = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__UpperCAmelCase ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
lowerCAmelCase__ : str = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
lowerCAmelCase__ : List[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ : str = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 )
lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 )
lowerCAmelCase__ : Union[str, Any] = VGroup(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 )
lowerCAmelCase__ : Tuple = Text("""Disk""" ,font_size=24 )
lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0.5 ,aligned_edge=__UpperCAmelCase )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(__UpperCAmelCase ,run_time=3 ) ,Write(__UpperCAmelCase ,run_time=1 ) ,Create(__UpperCAmelCase ,run_time=1 ) )
lowerCAmelCase__ : List[Any] = []
for i, rect in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__UpperCAmelCase ,run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(FadeOut(__UpperCAmelCase ) )
lowerCAmelCase__ : Tuple = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase ,run_time=3 ) )
self.play(
FadeOut(__UpperCAmelCase ,__UpperCAmelCase ,*__UpperCAmelCase ,*__UpperCAmelCase ) ,)
self.wait()
| 37 | import qiskit
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__lowercase = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowercase = qiskit.execute(lowercase , lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 210 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[Any] , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : Optional[int] = 3
SCREAMING_SNAKE_CASE : Optional[int] = 250
SCREAMING_SNAKE_CASE : int = ids_tensor((batch_size, length) , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = torch.ones((batch_size, length) , device=UpperCAmelCase_ , dtype=torch.float ) / length
return input_ids, scores
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_tensors(5 )
SCREAMING_SNAKE_CASE : str = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : str = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : List[str] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Tuple = MaxLengthCriteria(max_length=10 )
SCREAMING_SNAKE_CASE : List[Any] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : int = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : int = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
SCREAMING_SNAKE_CASE : str = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : str = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : List[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Optional[Any] = self._get_tensors(5 )
SCREAMING_SNAKE_CASE : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _A ( self : Union[str, Any] ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCAmelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
SCREAMING_SNAKE_CASE : Dict = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCAmelCase_ ) , 1 )
| 367 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''timm_backbone'''
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = backbone
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = features_only
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
| 319 | 0 |
def A ( lowercase ) -> bool:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
UpperCamelCase = sorted(string.lower() )
return len(lowercase ) == len(set(lowercase ) )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = input("Enter a string ").strip()
_UpperCAmelCase : Optional[int] = is_isogram(input_str)
print(F'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 222 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : int = {"vocab_file": "vocab.txt"}
_UpperCAmelCase : str = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
_UpperCAmelCase : Optional[Any] = {
"openbmb/cpm-ant-10b": 1_024,
}
def A ( lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = collections.OrderedDict()
with open(lowercase , 'r' , encoding='utf-8' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(lowercase ):
UpperCamelCase = token.rstrip('\n' )
UpperCamelCase = index
return vocab
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_="<unk>" , A_=200 ) -> Dict:
"""simple docstring"""
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = list(A_ )
if len(A_ ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(A_ ):
UpperCamelCase = len(A_ )
UpperCamelCase = None
while start < end:
UpperCamelCase = ''.join(chars[start:end] )
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(A_ )
UpperCamelCase = end
return sub_tokens
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : List[str] = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ["input_ids", "attention_mask"]
__lowercase : Tuple = False
def __init__( self , A_ , A_="<d>" , A_="</d>" , A_="<s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="</n>" , A_="</_>" , A_="left" , **A_ , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=A_ , eod_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , unk_token=A_ , line_token=A_ , space_token=A_ , padding_side=A_ , **A_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(A_ )
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda A_ : x[1] ) )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = []
for x in jieba.cut(A_ , cut_all=A_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(A_ ) )
return output_tokens
def __UpperCamelCase ( self , A_ , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(A_ , **A_ )
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
return token in self.encoder
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
return "".join(A_ )
def __UpperCamelCase ( self , A_ ) -> Tuple:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
return self.decoder.get(A_ , self.unk_token )
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(A_ ):
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
UpperCamelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['\n']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda A_ : x[1] ) )
with open(A_ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
UpperCamelCase = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __UpperCamelCase ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is not None:
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ ))
return [1] + ([0] * len(A_ ))
| 222 | 1 |
from __future__ import annotations
__A = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__A = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __A ( _lowercase ):
'''simple docstring'''
_A = []
_A = len(_lowercase )
for i in range(_lowercase ):
_A = -1
for j in range(i + 1 , _lowercase ):
if arr[i] < arr[j]:
_A = arr[j]
break
result.append(_lowercase )
return result
def __A ( _lowercase ):
'''simple docstring'''
_A = []
for i, outer in enumerate(_lowercase ):
_A = -1
for inner in arr[i + 1 :]:
if outer < inner:
_A = inner
break
result.append(_lowercase )
return result
def __A ( _lowercase ):
'''simple docstring'''
_A = len(_lowercase )
_A = []
_A = [-1] * arr_size
for index in reversed(range(_lowercase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_A = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__A = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 75 |
__A = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__A = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__A = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__A = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__A = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__A = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__A = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__A = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 75 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ = True , UpperCAmelCase_ = math.inf , UpperCAmelCase_ = -math.inf , UpperCAmelCase_ = math.inf , UpperCAmelCase_ = -math.inf , UpperCAmelCase_ = False , UpperCAmelCase_ = 1_00 , UpperCAmelCase_ = 0.01 , UpperCAmelCase_ = 1 , ):
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Optional[int] = search_prob
UpperCAmelCase : Any = start_temperate
UpperCAmelCase : Any = []
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Union[str, Any] = None
while not search_end:
UpperCAmelCase : Tuple = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase : Optional[Any] = current_state
scores.append(lowercase_ )
iterations += 1
UpperCAmelCase : Tuple = None
UpperCAmelCase : List[str] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase : Tuple = random.randint(0 , len(lowercase_ ) - 1 ) # picking a random neighbor
UpperCAmelCase : int = neighbors.pop(lowercase_ )
UpperCAmelCase : Optional[int] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase : Tuple = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase : Any = picked_neighbor
else:
UpperCAmelCase : str = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase : Optional[int] = picked_neighbor
UpperCAmelCase : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase : Dict = True
else:
UpperCAmelCase : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowercase_ ) , lowercase_ )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowercase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowercase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
lowercase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowercase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return (3 * x**2) - (6 * y)
lowercase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
lowercase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
| 151 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 247 | 0 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( _a , _a , _a , _a ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
snake_case_ : Dict = TOKENIZER_CLASSES
else:
snake_case_ : Union[str, Any] = {tokenizer_name: getattr(_a , tokenizer_name + '''Fast''' )}
logger.info(f"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
snake_case_ : int = TOKENIZER_CLASSES[tokenizer_name]
snake_case_ : Union[str, Any] = True
if checkpoint_name is None:
snake_case_ : Dict = list(tokenizer_class.max_model_input_sizes.keys() )
else:
snake_case_ : List[str] = [checkpoint_name]
logger.info(f"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(f"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
snake_case_ : Optional[Any] = tokenizer_class.from_pretrained(_a , force_download=_a )
# Save fast tokenizer
logger.info(f"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
snake_case_, snake_case_ : List[str] = checkpoint.split('''/''' )
snake_case_ : int = os.path.join(_a , _a )
elif add_prefix:
snake_case_ : List[Any] = checkpoint
snake_case_ : Dict = dump_path
else:
snake_case_ : List[Any] = None
snake_case_ : str = dump_path
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
snake_case_ : Dict = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
snake_case_ : Tuple = file_path.split(_a )[-1][0]
if next_char == "/":
snake_case_ : Dict = os.path.join(_a , _a )
snake_case_ : Any = None
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
snake_case_ : str = tokenizer.save_pretrained(
_a , legacy_format=_a , filename_prefix=_a )
logger.info(f"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(_a )
logger.info(f"=> removing {file_name}" )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
lowercase__ : Optional[int] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 155 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase__ : Any = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Tuple = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : Union[str, Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
lowercase__ : Any = {
'''google/rembert''': 2_56,
}
lowercase__ : Optional[Any] = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
_lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Tuple = RemBertTokenizer
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : List[Any]=True , lowercase_ : str=True , lowercase_ : Optional[int]=False , lowercase_ : List[Any]="[CLS]" , lowercase_ : Union[str, Any]="[SEP]" , lowercase_ : str="<unk>" , lowercase_ : Tuple="[SEP]" , lowercase_ : Optional[int]="<pad>" , lowercase_ : List[Any]="[CLS]" , lowercase_ : Union[str, Any]="[MASK]" , **lowercase_ : Dict , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : List[str] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
snake_case_ : Optional[int] = do_lower_case
snake_case_ : List[Any] = remove_space
snake_case_ : str = keep_accents
snake_case_ : str = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
def _snake_case ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ : Optional[int] = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1]
def _snake_case ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ : Union[str, Any] = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowercase_ ) )
return
snake_case_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 155 | 1 |
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
if num < 0:
return False
lowerCAmelCase__ : int = num
lowerCAmelCase__ : int = 0
while num > 0:
lowerCAmelCase__ : Optional[int] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 129 |
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : Any = [0] * len(lowerCamelCase_)
for i in range(1 ,len(lowerCamelCase_)):
# use last results for better performance - dynamic programming
lowerCAmelCase__ : Optional[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowerCAmelCase__ : Optional[int] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowerCAmelCase__ : Union[str, Any] = j
return prefix_result
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
return max(prefix_function(lowerCamelCase_))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 129 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
a : str = logging.get_logger(__name__)
a : List[str] = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
super().__init__(*lowercase__ , **lowercase__ )
if config is None:
assert isinstance(self.model , lowercase__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
a : str = self.model.config
else:
a : List[str] = config
a : List[str] = data_args
a : Optional[Any] = self.config.tgt_vocab_size if isinstance(self.config , lowercase__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
a : List[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a : Optional[Any] = label_smoothed_nll_loss
def __a ( self , lowerCAmelCase__ ) -> str:
if self.optimizer is None:
a : List[Any] = ["bias", "LayerNorm.weight"]
a : Optional[Any] = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
a : List[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a : List[Any] = Adafactor
a : Optional[Any] = {"scale_parameter": False, "relative_step": False}
else:
a : Dict = AdamW
a : List[str] = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
a : str = self.args.learning_rate
if self.sharded_ddp:
a : List[Any] = OSS(
params=lowercase__ , optim=lowercase__ , **lowercase__ , )
else:
a : List[str] = optimizer_cls(lowercase__ , **lowercase__ )
if self.lr_scheduler is None:
a : Optional[int] = self._get_lr_scheduler(lowercase__ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def __a ( self , lowerCAmelCase__ ) -> List[Any]:
a : List[str] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a : List[str] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a : Optional[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
a : Optional[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=lowercase__ )
return scheduler
def __a ( self ) -> Any:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a : Optional[Any] = model(**lowercase__ , use_cache=lowercase__ )[0]
a : List[str] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
a, a : Union[str, Any] = model(**lowercase__ , labels=lowercase__ , use_cache=lowercase__ )[:2]
else:
# compute label smoothed loss
a : Optional[int] = model(**lowercase__ , use_cache=lowercase__ )[0]
a : Optional[int] = torch.nn.functional.log_softmax(lowercase__ , dim=-1 )
a, a : Tuple = self.loss_fn(lowercase__ , lowercase__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : int = inputs.pop("labels" )
a, a : List[str] = self._compute_loss(lowercase__ , lowercase__ , lowercase__ )
return loss
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , ) -> List[Any]:
a : List[Any] = self._prepare_inputs(lowercase__ )
a : Any = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a : Optional[Any] = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **lowercase__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a : List[str] = self._pad_tensors_to_max_len(lowercase__ , gen_kwargs["max_length"] )
a : str = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
a, a : Union[str, Any] = self._compute_loss(lowercase__ , lowercase__ , lowercase__ )
a : Union[str, Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a : Any = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a : str = self._pad_tensors_to_max_len(lowercase__ , gen_kwargs["max_length"] )
return (loss, logits, labels)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
# If PAD token is not defined at least EOS token has to be defined
a : int = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f""" padded to `max_length`={max_length}""" )
a : Any = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
a : str = tensor
return padded_tensor
| 358 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _SCREAMING_SNAKE_CASE ( ) ->Optional[Any]:
'''simple docstring'''
a : int = HfArgumentParser(_lowercase )
a : int = parser.parse_args_into_dataclasses()[0]
a : Any = TensorFlowBenchmark(args=_lowercase )
try:
a : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a : Optional[Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
a : Tuple = " ".join(str(_lowercase ).split(" " )[:-1] )
a : Any = ""
a : Any = eval(str(_lowercase ).split(" " )[-1] )
a : List[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_lowercase )
if len(_lowercase ) > 0:
a : Tuple = full_error_msg + begin_error_msg + str(_lowercase )
raise ValueError(_lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 79 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=13 , __A=30 , __A=2 , __A=3 , __A=True , __A=True , __A=32 , __A=2 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=10 , __A=0.0_2 , __A=3 , __A=0.6 , __A=None , ) -> Any:
lowerCAmelCase_ :Optional[int] = parent
lowerCAmelCase_ :Tuple = batch_size
lowerCAmelCase_ :List[Any] = image_size
lowerCAmelCase_ :List[str] = patch_size
lowerCAmelCase_ :List[str] = num_channels
lowerCAmelCase_ :Optional[Any] = is_training
lowerCAmelCase_ :Any = use_labels
lowerCAmelCase_ :Tuple = hidden_size
lowerCAmelCase_ :Union[str, Any] = num_hidden_layers
lowerCAmelCase_ :List[Any] = num_attention_heads
lowerCAmelCase_ :Optional[Any] = intermediate_size
lowerCAmelCase_ :List[Any] = hidden_act
lowerCAmelCase_ :Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ :Any = attention_probs_dropout_prob
lowerCAmelCase_ :Tuple = type_sequence_label_size
lowerCAmelCase_ :List[str] = initializer_range
lowerCAmelCase_ :Optional[Any] = mask_ratio
lowerCAmelCase_ :Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCAmelCase_ :Optional[int] = (image_size // patch_size) ** 2
lowerCAmelCase_ :str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ :Union[str, Any] = None
if self.use_labels:
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Optional[Any]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __lowerCAmelCase ( self , __A , __A , __A ) -> Dict:
lowerCAmelCase_ :Union[str, Any] = TFViTMAEModel(config=_A )
lowerCAmelCase_ :str = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A ) -> int:
lowerCAmelCase_ :Any = TFViTMAEForPreTraining(_A )
lowerCAmelCase_ :Optional[Any] = model(_A , training=_A )
# expected sequence length = num_patches
lowerCAmelCase_ :List[str] = (self.image_size // self.patch_size) ** 2
lowerCAmelCase_ :Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCAmelCase_ :str = 1
lowerCAmelCase_ :Dict = TFViTMAEForPreTraining(_A )
lowerCAmelCase_ :Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ :List[str] = model(_A , training=_A )
lowerCAmelCase_ :Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :List[Any] = self.prepare_config_and_inputs()
(lowerCAmelCase_) :Any = config_and_inputs
lowerCAmelCase_ :Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCAmelCase_ :str = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
UpperCAmelCase_ :Dict = False
UpperCAmelCase_ :Dict = False
UpperCAmelCase_ :List[Any] = False
UpperCAmelCase_ :Dict = False
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :List[Any] = TFViTMAEModelTester(self )
lowerCAmelCase_ :Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :List[Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase_ :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :List[str] = model_class(_A )
lowerCAmelCase_ :Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ :Dict = [*signature.parameters.keys()]
lowerCAmelCase_ :Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def __lowerCAmelCase ( self ) -> Dict:
np.random.seed(2 )
lowerCAmelCase_ :int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Optional[int] = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase_ :Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCAmelCase_ :Optional[Any] = model_class(_A )
lowerCAmelCase_ :Union[str, Any] = self._prepare_for_class(_A , _A )
lowerCAmelCase_ :List[str] = model(_A , noise=_A )
lowerCAmelCase_ :Tuple = copy.deepcopy(self._prepare_for_class(_A , _A ) )
lowerCAmelCase_ :str = model(**_A , noise=_A )
lowerCAmelCase_ :Union[str, Any] = outputs_dict[0].numpy()
lowerCAmelCase_ :Optional[Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def __lowerCAmelCase ( self ) -> List[Any]:
np.random.seed(2 )
lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Tuple = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase_ :Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__A ):
lowerCAmelCase_ :Any = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_A ):
lowerCAmelCase_ :str = v.numpy()
else:
lowerCAmelCase_ :Optional[Any] = np.array(_A )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCAmelCase_ :int = model_class(_A )
lowerCAmelCase_ :List[Any] = self._prepare_for_class(_A , _A )
lowerCAmelCase_ :Any = prepare_numpy_arrays(_A )
lowerCAmelCase_ :List[Any] = model(_A , noise=_A )
lowerCAmelCase_ :List[Any] = model(**_A , noise=_A )
self.assert_outputs_same(_A , _A )
def __lowerCAmelCase ( self , __A , __A , __A ) -> List[str]:
np.random.seed(2 )
lowerCAmelCase_ :Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCAmelCase_ :Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCAmelCase_ :Optional[int] = tf.constant(_A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCAmelCase_ :Optional[Any] = tf_noise
super().check_pt_tf_models(_A , _A , _A )
def __lowerCAmelCase ( self ) -> Dict:
np.random.seed(2 )
lowerCAmelCase_ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_A )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(_A , _A ),)
if isinstance(_A , _A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_A , """_keras_serializable""" , _A )
}
lowerCAmelCase_ :List[Any] = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase_ :List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCAmelCase_ :Optional[int] = tf.convert_to_tensor(_A )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
lowerCAmelCase_ :Optional[Any] = main_layer_class(_A )
lowerCAmelCase_ :List[str] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCAmelCase_ :Union[str, Any] = tf.keras.Model(_A , outputs=main_layer(_A ) )
lowerCAmelCase_ :int = model(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ :List[Any] = os.path.join(_A , """keras_model.h5""" )
model.save(_A )
lowerCAmelCase_ :str = tf.keras.models.load_model(
_A , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_A , tf.keras.Model )
lowerCAmelCase_ :List[str] = model(_A )
self.assert_outputs_same(_A , _A )
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
np.random.seed(2 )
lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :int = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase_ :int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCAmelCase_ :Optional[Any] = model_class(_A )
lowerCAmelCase_ :Optional[Any] = self._prepare_for_class(_A , _A )
lowerCAmelCase_ :int = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
lowerCAmelCase_ :Any = outputs.last_hidden_state.numpy()
lowerCAmelCase_ :Optional[int] = 0
else:
lowerCAmelCase_ :str = outputs.logits.numpy()
lowerCAmelCase_ :Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
lowerCAmelCase_ :Any = model_class.from_pretrained(_A )
lowerCAmelCase_ :Any = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
lowerCAmelCase_ :Dict = after_outputs['last_hidden_state'].numpy()
lowerCAmelCase_ :Dict = 0
else:
lowerCAmelCase_ :Any = after_outputs['logits'].numpy()
lowerCAmelCase_ :Optional[Any] = 0
lowerCAmelCase_ :Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
def __lowerCAmelCase ( self ) -> str:
np.random.seed(2 )
lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Optional[int] = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase_ :Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCAmelCase_ :str = model_class(_A )
lowerCAmelCase_ :int = self._prepare_for_class(_A , _A )
lowerCAmelCase_ :str = model(_A , noise=_A )
lowerCAmelCase_ :Dict = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_A )
lowerCAmelCase_ :Any = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCAmelCase_ :str = model_class.from_config(model.config )
lowerCAmelCase_ :Union[str, Any] = new_model(_A ) # Build model
new_model.set_weights(model.get_weights() )
lowerCAmelCase_ :List[str] = new_model(_A , noise=_A )
self.assert_outputs_same(_A , _A )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.""" )
def __lowerCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def __lowerCAmelCase ( self ) -> Tuple:
pass
@slow
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Optional[Any] = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(_A )
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Dict:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) -> Dict:
np.random.seed(2 )
lowerCAmelCase_ :List[str] = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
lowerCAmelCase_ :List[Any] = self.default_image_processor
lowerCAmelCase_ :Dict = prepare_img()
lowerCAmelCase_ :Optional[Any] = image_processor(images=_A , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCAmelCase_ :int = ViTMAEConfig()
lowerCAmelCase_ :List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCAmelCase_ :List[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCAmelCase_ :Optional[Any] = model(**_A , noise=_A )
# verify the logits
lowerCAmelCase_ :Optional[int] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _A )
lowerCAmelCase_ :Any = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _A , atol=1E-4 )
| 84 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = 'hf-internal-testing/tiny-random-t5'
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_A )
snake_case_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A )
snake_case_ : List[Any] = tokenizer('This is me' , return_tensors='pt' )
snake_case_ : Any = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case_ : Optional[Any] = model.generate(**_A )
snake_case_ : int = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_A )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case_ : Optional[Any] = model_reloaded.generate(**_A )
self.assertTrue(torch.allclose(_A , _A ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : Any = 'hf-internal-testing/tiny-random-t5'
snake_case_ : int = AutoModelForSeqaSeqLM.from_pretrained(_A )
snake_case_ : Dict = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_A ):
model.save_pretrained(_A )
snake_case_ : Union[str, Any] = model.reverse_bettertransformer()
model.save_pretrained(_A )
| 327 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
snake_case_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
snake_case_ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
snake_case_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as f:
lowercase__ : List[str] = Image.open(SCREAMING_SNAKE_CASE_ )
return im.convert('RGB' )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__lowerCamelCase : Optional[str] = field(
default=__snake_case , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
__lowerCamelCase : Optional[str] = field(
default=__snake_case , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__lowerCamelCase : Optional[str] = field(default=__snake_case , metadata={"""help""": """A folder containing the training data."""} )
__lowerCamelCase : Optional[str] = field(default=__snake_case , metadata={"""help""": """A folder containing the validation data."""} )
__lowerCamelCase : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
__lowerCamelCase : Optional[int] = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__lowerCamelCase : Optional[int] = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def snake_case_ ( self):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.')
@dataclass
class SCREAMING_SNAKE_CASE__ :
__lowerCamelCase : str = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
__lowerCamelCase : Optional[str] = field(
default=__snake_case , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__snake_case )} , )
__lowerCamelCase : Optional[str] = field(
default=__snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCamelCase : Optional[str] = field(
default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
__lowerCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__lowerCamelCase : str = field(default=__snake_case , metadata={"""help""": """Name or path of preprocessor config."""} )
__lowerCamelCase : bool = field(
default=__snake_case , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__lowerCamelCase : bool = field(
default=__snake_case , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : str = torch.stack([example['pixel_values'] for example in examples] )
lowercase__ : List[Any] = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowercase__ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase__ : List[Any] = {}
if data_args.train_dir is not None:
lowercase__ : List[str] = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
lowercase__ : Optional[Any] = os.path.join(data_args.validation_dir , '**' )
lowercase__ : Optional[int] = load_dataset(
'imagefolder' , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__ : Any = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE_ ) and data_args.train_val_split > 0.0:
lowercase__ : Optional[int] = dataset['train'].train_test_split(data_args.train_val_split )
lowercase__ : Tuple = split['train']
lowercase__ : Union[str, Any] = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase__ : Any = dataset['train'].features['labels'].names
lowercase__ : int = {}, {}
for i, label in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Union[str, Any] = str(SCREAMING_SNAKE_CASE_ )
lowercase__ : Dict = label
# Load the accuracy metric from the datasets package
lowercase__ : Dict = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE_ : Dict ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowercase__ : Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE_ ) , labelaid=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : Dict = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowercase__ : Tuple = image_processor.size['shortest_edge']
else:
lowercase__ : Optional[Any] = (image_processor.size['height'], image_processor.size['width'])
lowercase__ : Union[str, Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowercase__ : Any = Compose(
[
RandomResizedCrop(SCREAMING_SNAKE_CASE_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowercase__ : List[str] = Compose(
[
Resize(SCREAMING_SNAKE_CASE_ ),
CenterCrop(SCREAMING_SNAKE_CASE_ ),
ToTensor(),
normalize,
] )
def train_transforms(SCREAMING_SNAKE_CASE_ : Dict ):
lowercase__ : str = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(SCREAMING_SNAKE_CASE_ : List[str] ):
lowercase__ : Dict = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
lowercase__ : str = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(SCREAMING_SNAKE_CASE_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
lowercase__ : Optional[Any] = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(SCREAMING_SNAKE_CASE_ )
# Initalize our trainer
lowercase__ : Tuple = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
lowercase__ : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
lowercase__ : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ : List[str] = last_checkpoint
lowercase__ : Union[str, Any] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ : Tuple = trainer.evaluate()
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE_ )
# Write model card and (optionally) push to hub
lowercase__ : Optional[int] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 366 |
from __future__ import annotations
import math
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : int = str(SCREAMING_SNAKE_CASE_ )
lowercase__ : Union[str, Any] = [n]
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if len(str(SCREAMING_SNAKE_CASE_ ) ) > 3:
if not is_prime(int(str(SCREAMING_SNAKE_CASE_ )[-3:] ) ) or not is_prime(int(str(SCREAMING_SNAKE_CASE_ )[:3] ) ):
return False
return True
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 11 ):
'''simple docstring'''
lowercase__ : list[int] = []
lowercase__ : Tuple = 13
while len(SCREAMING_SNAKE_CASE_ ) != count:
if validate(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[int] = list_truncated_nums(SCREAMING_SNAKE_CASE_ )
if all(is_prime(SCREAMING_SNAKE_CASE_ ) for i in list_nums ):
list_truncated_primes.append(SCREAMING_SNAKE_CASE_ )
num += 2
return list_truncated_primes
def snake_case__ ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(11)) = }''')
| 216 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case : Union[str, Any] = 16
__snake_case : str = 32
def _UpperCAmelCase ( a__ , a__ = 1_6):
'''simple docstring'''
a_ : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""")
a_ : Optional[int] = load_dataset("""glue""" , """mrpc""")
def tokenize_function(a__):
# max_length=None => use the model max length (it's actually the default)
a_ : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=a__ , max_length=a__)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a_ : Optional[int] = datasets.map(
a__ , batched=a__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a_ : Dict = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(a__):
# On TPU it's best to pad everything to the same length or training will be very slow.
a_ : int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a_ : List[str] = 1_6
elif accelerator.mixed_precision != "no":
a_ : str = 8
else:
a_ : str = None
return tokenizer.pad(
a__ , padding="""longest""" , max_length=a__ , pad_to_multiple_of=a__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
a_ : List[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=a__ , collate_fn=a__ , batch_size=a__)
a_ : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=a__ , collate_fn=a__ , batch_size=a__)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case : str = mocked_dataloaders # noqa: F811
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , a__) == "1":
a_ : Dict = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
a_ : int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir)
else:
a_ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a_ : int = config["""lr"""]
a_ : List[str] = int(config["""num_epochs"""])
a_ : Dict = int(config["""seed"""])
a_ : int = int(config["""batch_size"""])
set_seed(a__)
a_ , a_ : Tuple = get_dataloaders(a__ , a__)
a_ : str = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
a_ : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a_ : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE
a_ : Tuple = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a_ : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=a__)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a_ : int = model.to(accelerator.device)
# Instantiate optimizer
a_ : Tuple = AdamW(params=model.parameters() , lr=a__)
# Instantiate scheduler
a_ : Tuple = get_linear_schedule_with_warmup(
optimizer=a__ , num_warmup_steps=1_0_0 , num_training_steps=(len(a__) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a_ , a_ , a_ , a_ , a_ : Dict = accelerator.prepare(
a__ , a__ , a__ , a__ , a__)
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
a_ : Dict = os.path.split(a__)[-1].split(""".""")[0]
accelerator.init_trackers(a__ , a__)
# Now we train the model
for epoch in range(a__):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
a_ : Optional[int] = 0
for step, batch in enumerate(a__):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
a_ : Optional[Any] = model(**a__)
a_ : int = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
a_ : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(a__)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a__):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device)
with torch.no_grad():
a_ : List[str] = model(**a__)
a_ : Dict = outputs.logits.argmax(dim=-1)
a_ , a_ : Dict = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=a__ , references=a__ , )
a_ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , a__)
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(a__),
"""epoch""": epoch,
} , step=a__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=a__ , default=a__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=a__ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
a_ : Dict = parser.parse_args()
a_ : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(a__ , a__)
if __name__ == "__main__":
main()
| 248 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__snake_case : Optional[int] = """sshleifer/mar_enro_6_3_student"""
class A__(a_ ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Tuple:
super().setUp()
a_ : Union[str, Any] = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=_lowercase , )
a_ : Union[str, Any] = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
MarianMTModel.from_pretrained(_lowercase )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> int:
a_ : Any = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
a_ : List[str] = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
a_ : Dict = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
a_ : Optional[int] = bash_script.replace(_lowercase , str(_lowercase ) )
a_ : int = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
a_ : Dict = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
a_ : Union[str, Any] = ["""finetune.py"""] + bash_script.split() + args
with patch.object(_lowercase , """argv""" , _lowercase ):
a_ : Optional[Any] = argparse.ArgumentParser()
a_ : Tuple = pl.Trainer.add_argparse_args(_lowercase )
a_ : Any = SummarizationModule.add_model_specific_args(_lowercase , os.getcwd() )
a_ : str = parser.parse_args()
a_ : Union[str, Any] = main(_lowercase )
# Check metrics
a_ : Any = load_json(model.metrics_save_path )
a_ : List[Any] = metrics["""val"""][0]
a_ : Union[str, Any] = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , _lowercase )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.0_1 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
a_ : Optional[Any] = os.listdir(_lowercase )
a_ : Dict = [x for x in contents if x.endswith(""".ckpt""" )][0]
a_ : str = os.path.join(args.output_dir , _lowercase )
a_ : Any = torch.load(_lowercase , map_location="""cpu""" )
a_ : Union[str, Any] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
a_ : List[Any] = {os.path.basename(_lowercase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class A__(a_ ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Tuple = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
a_ : str = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 128,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
a_ : Union[str, Any] = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
a_ : Union[str, Any] = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
a_ : Any = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
a_ : Dict = bash_script.replace(_lowercase , str(_lowercase ) )
a_ : int = self.get_auto_remove_tmp_dir()
a_ : Optional[Any] = bash_script.replace("""--fp16""" , """""" )
a_ : List[str] = 6
a_ : str = (
["""distillation.py"""]
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
"""--gpus=1""",
"""--learning_rate=1e-3""",
F'''--num_train_epochs={epochs}''',
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(_lowercase , """argv""" , _lowercase ):
a_ : int = argparse.ArgumentParser()
a_ : Any = pl.Trainer.add_argparse_args(_lowercase )
a_ : str = SummarizationDistiller.add_model_specific_args(_lowercase , os.getcwd() )
a_ : Any = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
a_ : Dict = distill_main(_lowercase )
# Check metrics
a_ : Any = load_json(model.metrics_save_path )
a_ : int = metrics["""val"""][0]
a_ : Union[str, Any] = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , _lowercase )
# check lightning ckpt can be loaded and has a reasonable statedict
a_ : Dict = os.listdir(_lowercase )
a_ : List[Any] = [x for x in contents if x.endswith(""".ckpt""" )][0]
a_ : int = os.path.join(args.output_dir , _lowercase )
a_ : Union[str, Any] = torch.load(_lowercase , map_location="""cpu""" )
a_ : List[str] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
a_ : List[str] = {os.path.basename(_lowercase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 248 | 1 |
"""simple docstring"""
import sys
_lowerCAmelCase : str = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = 1
for digit in s:
product *= int(_lowerCamelCase )
return product
def lowerCamelCase_( _lowerCamelCase = N ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = -sys.maxsize - 1
_lowerCamelCase : int = n[:13]
_lowerCamelCase : int = 13
while cur_index < len(_lowerCamelCase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
_lowerCamelCase : Optional[Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
_lowerCamelCase : Tuple = max(_lowerCamelCase , str_eval(_lowerCamelCase ) )
_lowerCamelCase : int = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''') | 340 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class A_ ( _a ):
lowerCAmelCase__ = 'camembert'
def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Dict = classifier_dropout
class A_ ( _a ):
@property
def _lowercase ( self: Any ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 340 | 1 |
'''simple docstring'''
from __future__ import annotations
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if len(a__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(a__ )
or left < -len(a__ )
or right >= len(a__ )
or right < -len(a__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__SCREAMING_SNAKE_CASE = (left + right) >> 1 # the middle
__SCREAMING_SNAKE_CASE = find_max(a__ , a__ , a__ ) # find max in range[left, mid]
__SCREAMING_SNAKE_CASE = find_max(a__ , mid + 1 , a__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 267 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(__SCREAMING_SNAKE_CASE ) , 0 )
for detected_object in outputs:
self.assertEqual(
__SCREAMING_SNAKE_CASE , {
"""score""": ANY(__SCREAMING_SNAKE_CASE ),
"""label""": ANY(__SCREAMING_SNAKE_CASE ),
"""box""": {"""xmin""": ANY(__SCREAMING_SNAKE_CASE ), """ymin""": ANY(__SCREAMING_SNAKE_CASE ), """xmax""": ANY(__SCREAMING_SNAKE_CASE ), """ymax""": ANY(__SCREAMING_SNAKE_CASE )},
} , )
import datasets
__SCREAMING_SNAKE_CASE = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
__SCREAMING_SNAKE_CASE = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
__SCREAMING_SNAKE_CASE = object_detector(__SCREAMING_SNAKE_CASE , threshold=0.0 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for outputs in batch_outputs:
self.assertGreater(len(__SCREAMING_SNAKE_CASE ) , 0 )
for detected_object in outputs:
self.assertEqual(
__SCREAMING_SNAKE_CASE , {
"""score""": ANY(__SCREAMING_SNAKE_CASE ),
"""label""": ANY(__SCREAMING_SNAKE_CASE ),
"""box""": {"""xmin""": ANY(__SCREAMING_SNAKE_CASE ), """ymin""": ANY(__SCREAMING_SNAKE_CASE ), """xmax""": ANY(__SCREAMING_SNAKE_CASE ), """ymax""": ANY(__SCREAMING_SNAKE_CASE )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@require_torch
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-detr-mobilenetsv3"""
__SCREAMING_SNAKE_CASE = AutoModelForObjectDetection.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
__SCREAMING_SNAKE_CASE = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """facebook/detr-resnet-50"""
__SCREAMING_SNAKE_CASE = AutoModelForObjectDetection.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__SCREAMING_SNAKE_CASE = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """facebook/detr-resnet-50"""
__SCREAMING_SNAKE_CASE = pipeline("""object-detection""" , model=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
__SCREAMING_SNAKE_CASE = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0.9985
__SCREAMING_SNAKE_CASE = """facebook/detr-resnet-50"""
__SCREAMING_SNAKE_CASE = pipeline("""object-detection""" , model=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """Narsil/layoutlmv3-finetuned-funsd"""
__SCREAMING_SNAKE_CASE = 0.9993
__SCREAMING_SNAKE_CASE = pipeline("""object-detection""" , model=__SCREAMING_SNAKE_CASE , threshold=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , )
| 267 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Optional[Any] = {'vocab_file': 'spiece.model'}
a : Tuple = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
a : Dict = {'bert_for_seq_generation': 512}
class a ( _lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = []
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Any , lowercase_ : str , lowercase_ : Optional[Any]="<s>" , lowercase_ : Any="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : List[Any]="<pad>" , lowercase_ : List[str]="<::::>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Optional[int] , ):
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , sep_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def A_ ( self : int ):
return self.sp_model.get_piece_size()
def A_ ( self : Union[str, Any] ):
snake_case_ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Any , lowercase_ : Optional[int] ):
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self : Any , lowercase_ : str ):
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def A_ ( self : Optional[int] , lowercase_ : Union[str, Any] ):
return self.sp_model.piece_to_id(lowercase_ )
def A_ ( self : Dict , lowercase_ : str ):
snake_case_ = self.sp_model.IdToPiece(lowercase_ )
return token
def A_ ( self : Optional[int] , lowercase_ : List[Any] ):
snake_case_ = []
snake_case_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
snake_case_ = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def A_ ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 72 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> list[list]:
'''simple docstring'''
snake_case_ = current_set.copy()
for row_index, row in enumerate(__UpperCAmelCase ):
snake_case_ = row[0]
for column_index, column in enumerate(__UpperCAmelCase ):
if magnitude == 0:
snake_case_ = column
continue
snake_case_ = column / magnitude
# Subtract to cancel term
snake_case_ = current_set[0]
snake_case_ = [first_row]
snake_case_ = current_set[1::]
for row in current_set:
snake_case_ = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__UpperCAmelCase )
continue
for column_index in range(len(__UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
snake_case_ = final_set[0]
snake_case_ = []
snake_case_ = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
snake_case_ = simplify(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, __UpperCAmelCase )
snake_case_ = resultant
return final_set
def __magic_name__ ( __UpperCAmelCase ) -> list:
'''simple docstring'''
if len(__UpperCAmelCase ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
snake_case_ = len(__UpperCAmelCase ) + 1
if any(len(__UpperCAmelCase ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(__UpperCAmelCase, (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(__UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
snake_case_ = equations.copy()
if any(0 in row for row in data_set ):
snake_case_ = data_set.copy()
snake_case_ = []
for row_index, row in enumerate(__UpperCAmelCase ):
if 0 not in row:
snake_case_ = data_set.pop(__UpperCAmelCase )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0, __UpperCAmelCase )
snake_case_ = data_set.copy()
snake_case_ = simplify(__UpperCAmelCase )
snake_case_ = simplified[::-1]
snake_case_ = []
for row in simplified:
snake_case_ = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
snake_case_ = row.copy()[: len(__UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__UpperCAmelCase ) == 0:
solutions.append(0 )
continue
snake_case_ = temp_row[1::]
snake_case_ = temp_row[::-1]
for column_index, column in enumerate(__UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(__UpperCAmelCase )
snake_case_ = []
for item in solutions:
final.append(float(round(__UpperCAmelCase, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : str = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 72 | 1 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case_ : List[Any] = logging.get_logger(__name__)
class lowercase__ ( enum.Enum ):
lowercase__ = 0
lowercase__ = 1
@add_end_docstrings(__A )
class lowercase__ ( __A ):
lowercase__ = 'generated'
def __init__( self : Union[str, Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : List[Any] ):
'''simple docstring'''
super().__init__(*__lowercase ,**__lowercase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Tuple=None ,**lowerCamelCase__ : str ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = {}
if truncation is not None:
_UpperCamelCase : List[Any] = truncation
_UpperCamelCase : List[str] = generate_kwargs
_UpperCamelCase : int = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase : str = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase : Tuple = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase : List[str] = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase : str = self.tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
if len(__lowercase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_UpperCamelCase : Any = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
return True
def UpperCamelCase_ ( self : Optional[int] ,*lowerCamelCase__ : int ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : int = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] ,__lowercase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
_UpperCamelCase : Union[str, Any] = ([prefix + arg for arg in args[0]],)
_UpperCamelCase : Tuple = True
elif isinstance(args[0] ,__lowercase ):
_UpperCamelCase : int = (prefix + args[0],)
_UpperCamelCase : List[Any] = False
else:
raise ValueError(
F' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' )
_UpperCamelCase : List[str] = self.tokenizer(*__lowercase ,padding=__lowercase ,truncation=__lowercase ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Optional[int] ,*lowerCamelCase__ : str ,**lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = super().__call__(*__lowercase ,**__lowercase )
if (
isinstance(args[0] ,__lowercase )
and all(isinstance(__lowercase ,__lowercase ) for el in args[0] )
and all(len(__lowercase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE ,**lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Tuple = self._parse_and_tokenize(__lowercase ,truncation=__lowercase ,**__lowercase )
return inputs
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : str ,**lowerCamelCase__ : Dict ):
'''simple docstring'''
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase : int = model_inputs['input_ids'].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = tf.shape(model_inputs['input_ids'] ).numpy()
_UpperCamelCase : Union[str, Any] = generate_kwargs.get('min_length' ,self.model.config.min_length )
_UpperCamelCase : Tuple = generate_kwargs.get('max_length' ,self.model.config.max_length )
self.check_inputs(__lowercase ,generate_kwargs['min_length'] ,generate_kwargs['max_length'] )
_UpperCamelCase : Optional[Any] = self.model.generate(**__lowercase ,**__lowercase )
_UpperCamelCase : List[str] = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase : Tuple = output_ids.reshape(__lowercase ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
_UpperCamelCase : int = tf.reshape(__lowercase ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int]=ReturnType.TEXT ,lowerCamelCase__ : List[Any]=False ):
'''simple docstring'''
_UpperCamelCase : str = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase : List[str] = {F'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase : Optional[int] = {
F'{self.return_name}_text': self.tokenizer.decode(
__lowercase ,skip_special_tokens=__lowercase ,clean_up_tokenization_spaces=__lowercase ,)
}
records.append(__lowercase )
return records
@add_end_docstrings(__A )
class lowercase__ ( __A ):
lowercase__ = 'summary'
def __call__( self : str ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : List[str] ):
'''simple docstring'''
return super().__call__(*__lowercase ,**__lowercase )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
if max_length < min_length:
logger.warning(F'Your min_length={min_length} must be inferior than your max_length={max_length}.' )
if input_length < max_length:
logger.warning(
F'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' )
@add_end_docstrings(__A )
class lowercase__ ( __A ):
lowercase__ = 'translation'
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
F'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def UpperCamelCase_ ( self : List[Any] ,*lowerCamelCase__ : int ,lowerCamelCase__ : Optional[Any]=TruncationStrategy.DO_NOT_TRUNCATE ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : Any=None ):
'''simple docstring'''
if getattr(self.tokenizer ,'_build_translation_inputs' ,__lowercase ):
return self.tokenizer._build_translation_inputs(
*__lowercase ,return_tensors=self.framework ,truncation=__lowercase ,src_lang=__lowercase ,tgt_lang=__lowercase )
else:
return super()._parse_and_tokenize(*__lowercase ,truncation=__lowercase )
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : Union[str, Any]=None ,**lowerCamelCase__ : Tuple ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Union[str, Any] = super()._sanitize_parameters(**__lowercase )
if src_lang is not None:
_UpperCamelCase : str = src_lang
if tgt_lang is not None:
_UpperCamelCase : List[str] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase : Optional[int] = kwargs.get('task' ,self.task )
_UpperCamelCase : Tuple = task.split('_' )
if task and len(__lowercase ) == 4:
# translation, XX, to YY
_UpperCamelCase : Any = items[1]
_UpperCamelCase : int = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return super().__call__(*__lowercase ,**__lowercase )
| 83 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_UpperCAmelCase : List[str] = 8
def __magic_name__( lowerCamelCase, lowerCamelCase=BITS):
__lowerCAmelCase = x.device
__lowerCAmelCase = (x * 2_5_5).int().clamp(0, 2_5_5)
__lowerCAmelCase = 2 ** torch.arange(bits - 1, -1, -1, device=lowerCamelCase)
__lowerCAmelCase = rearrange(lowerCamelCase, '''d -> d 1 1''')
__lowerCAmelCase = rearrange(lowerCamelCase, '''b c h w -> b c 1 h w''')
__lowerCAmelCase = ((x & mask) != 0).float()
__lowerCAmelCase = rearrange(lowerCamelCase, '''b c d h w -> b (c d) h w''')
__lowerCAmelCase = bits * 2 - 1
return bits
def __magic_name__( lowerCamelCase, lowerCamelCase=BITS):
__lowerCAmelCase = x.device
__lowerCAmelCase = (x > 0).int()
__lowerCAmelCase = 2 ** torch.arange(bits - 1, -1, -1, device=lowerCamelCase, dtype=torch.intaa)
__lowerCAmelCase = rearrange(lowerCamelCase, '''d -> d 1 1''')
__lowerCAmelCase = rearrange(lowerCamelCase, '''b (c d) h w -> b c d h w''', d=8)
__lowerCAmelCase = reduce(x * mask, '''b c d h w -> b c h w''', '''sum''')
return (dec / 2_5_5).clamp(0.0, 1.0)
def __magic_name__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 0.0, lowerCamelCase = True, lowerCamelCase=None, lowerCamelCase = True, ):
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''')
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__lowerCAmelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__lowerCAmelCase = self.alphas_cumprod[timestep]
__lowerCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__lowerCAmelCase = self.bit_scale
if self.config.clip_sample:
__lowerCAmelCase = torch.clamp(lowerCamelCase, -scale, lowerCamelCase)
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__lowerCAmelCase = self._get_variance(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__lowerCAmelCase = model_output.device if torch.is_tensor(lowerCamelCase) else '''cpu'''
__lowerCAmelCase = torch.randn(model_output.shape, dtype=model_output.dtype, generator=lowerCamelCase).to(lowerCamelCase)
__lowerCAmelCase = self._get_variance(lowerCamelCase, lowerCamelCase) ** 0.5 * eta * noise
__lowerCAmelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase, pred_original_sample=lowerCamelCase)
def __magic_name__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase="epsilon", lowerCamelCase=None, lowerCamelCase = True, ):
__lowerCAmelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__lowerCAmelCase , __lowerCAmelCase = torch.split(lowerCamelCase, sample.shape[1], dim=1)
else:
__lowerCAmelCase = None
# 1. compute alphas, betas
__lowerCAmelCase = self.alphas_cumprod[t]
__lowerCAmelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
__lowerCAmelCase = 1 - alpha_prod_t
__lowerCAmelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__lowerCAmelCase = model_output
else:
raise ValueError(F"""Unsupported prediction_type {prediction_type}.""")
# 3. Clip "predicted x_0"
__lowerCAmelCase = self.bit_scale
if self.config.clip_sample:
__lowerCAmelCase = torch.clamp(lowerCamelCase, -scale, lowerCamelCase)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__lowerCAmelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowerCAmelCase = 0
if t > 0:
__lowerCAmelCase = torch.randn(
model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=lowerCamelCase).to(model_output.device)
__lowerCAmelCase = (self._get_variance(lowerCamelCase, predicted_variance=lowerCamelCase) ** 0.5) * noise
__lowerCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCamelCase, pred_original_sample=lowerCamelCase)
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase = 1.0 , ):
super().__init__()
__lowerCAmelCase = bit_scale
__lowerCAmelCase = (
ddim_bit_scheduler_step if isinstance(__lowercase , __lowercase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__lowercase , scheduler=__lowercase )
@torch.no_grad()
def __call__(self , __lowercase = 2_56 , __lowercase = 2_56 , __lowercase = 50 , __lowercase = None , __lowercase = 1 , __lowercase = "pil" , __lowercase = True , **__lowercase , ):
__lowerCAmelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__lowercase , )
__lowerCAmelCase = decimal_to_bits(__lowercase ) * self.bit_scale
__lowerCAmelCase = latents.to(self.device )
self.scheduler.set_timesteps(__lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__lowerCAmelCase = self.unet(__lowercase , __lowercase ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
__lowerCAmelCase = bits_to_decimal(__lowercase )
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
| 174 | 0 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(lowerCAmelCase__ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(lowerCAmelCase__ )
}
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
conv_attn_to_linear(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(lowerCAmelCase__ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
conv_attn_to_linear(lowerCAmelCase__ )
return new_checkpoint
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , ):
# Only support V1
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(lowerCAmelCase__ )
UpperCAmelCase_ = 512
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(lowerCAmelCase__ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(lowerCAmelCase__ )
else:
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(lowerCAmelCase__ , image_size=lowerCAmelCase__ )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = AutoencoderKL(**lowerCAmelCase__ )
vae.load_state_dict(lowerCAmelCase__ )
vae.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
lowerCamelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 241 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCamelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase__ ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = "utf-8"
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = True # deprecated
UpperCamelCase = None # deprecated
UpperCamelCase = 10 << 20 # 10MB
UpperCamelCase = None
class lowercase__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCamelCase = JsonConfig
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
UpperCAmelCase_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : Tuple , _UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
UpperCAmelCase_ = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = [files]
UpperCAmelCase_ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = [files]
UpperCAmelCase_ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={"files": files} ) )
return splits
def lowercase__ ( self : str , _UpperCAmelCase : pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCAmelCase_ = self.config.features.arrow_schema.field(_UpperCAmelCase ).type
UpperCAmelCase_ = pa_table.append_column(_UpperCAmelCase , pa.array([None] * len(_UpperCAmelCase ) , type=_UpperCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(_UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> str:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase_ = json.load(_UpperCAmelCase )
# We keep only the field we are interested in
UpperCAmelCase_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_UpperCAmelCase , (list, tuple) ):
UpperCAmelCase_ = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase_ = {col: [row.get(_UpperCAmelCase ) for row in dataset] for col in keys}
else:
UpperCAmelCase_ = dataset
UpperCAmelCase_ = pa.Table.from_pydict(_UpperCAmelCase )
yield file_idx, self._cast_table(_UpperCAmelCase )
# If the file has one json object per line
else:
with open(_UpperCAmelCase , "rb" ) as f:
UpperCAmelCase_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCAmelCase_ = max(self.config.chunksize // 32 , 16 << 10 )
UpperCAmelCase_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
UpperCAmelCase_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_UpperCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCAmelCase_ = batch.decode(self.config.encoding , errors=_UpperCAmelCase ).encode("utf-8" )
try:
while True:
try:
UpperCAmelCase_ = paj.read_json(
io.BytesIO(_UpperCAmelCase ) , read_options=paj.ReadOptions(block_size=_UpperCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_UpperCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(_UpperCAmelCase )
or block_size > len(_UpperCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(_UpperCAmelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase_ = json.load(_UpperCAmelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(_UpperCAmelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_UpperCAmelCase , _UpperCAmelCase ): # list is the only sequence type supported in JSON
try:
UpperCAmelCase_ = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase_ = {col: [row.get(_UpperCAmelCase ) for row in dataset] for col in keys}
UpperCAmelCase_ = pa.Table.from_pydict(_UpperCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(_UpperCAmelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(_UpperCAmelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(_UpperCAmelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_UpperCAmelCase )
batch_idx += 1
| 241 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase ( snake_case_ ):
def __init__( self : List[str] , UpperCAmelCase__ : TransformeraDModel , UpperCAmelCase__ : AutoencoderKL , UpperCAmelCase__ : KarrasDiffusionSchedulers , UpperCAmelCase__ : Optional[Dict[int, str]] = None , ) -> Optional[Any]:
super().__init__()
self.register_modules(transformer=UpperCAmelCase__ , vae=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
# create a imagenet -> id dictionary for easier use
_a : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
_a : List[Any] = int(UpperCAmelCase__ )
_a : Union[str, Any] = dict(sorted(self.labels.items() ) )
def _lowercase ( self : str , UpperCAmelCase__ : Union[str, List[str]] ) -> List[int]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : Any = list(UpperCAmelCase__ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
_a : int = len(UpperCAmelCase__ )
_a : Dict = self.transformer.config.sample_size
_a : Optional[int] = self.transformer.config.in_channels
_a : Dict = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=UpperCAmelCase__ , device=self.device , dtype=self.transformer.dtype , )
_a : Any = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
_a : Any = torch.tensor(UpperCAmelCase__ , device=self.device ).reshape(-1 )
_a : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
_a : List[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
_a : Optional[Any] = latent_model_input[: len(UpperCAmelCase__ ) // 2]
_a : List[str] = torch.cat([half, half] , dim=0 )
_a : Any = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = t
if not torch.is_tensor(UpperCAmelCase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_a : Any = latent_model_input.device.type == """mps"""
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : Tuple = torch.floataa if is_mps else torch.floataa
else:
_a : List[Any] = torch.intaa if is_mps else torch.intaa
_a : Optional[int] = torch.tensor([timesteps] , dtype=UpperCAmelCase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
_a : Dict = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_a : Union[str, Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
_a : Union[str, Any] = self.transformer(
UpperCAmelCase__ , timestep=UpperCAmelCase__ , class_labels=UpperCAmelCase__ ).sample
# perform guidance
if guidance_scale > 1:
_a , _a : Union[str, Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_a , _a : List[Any] = torch.split(UpperCAmelCase__ , len(UpperCAmelCase__ ) // 2 , dim=0 )
_a : Optional[int] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_a : str = torch.cat([half_eps, half_eps] , dim=0 )
_a : Dict = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_a , _a : List[str] = torch.split(UpperCAmelCase__ , UpperCAmelCase__ , dim=1 )
else:
_a : List[str] = noise_pred
# compute previous image: x_t -> x_t-1
_a : Any = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
if guidance_scale > 1:
_a , _a : Optional[Any] = latent_model_input.chunk(2 , dim=0 )
else:
_a : Any = latent_model_input
_a : int = 1 / self.vae.config.scaling_factor * latents
_a : Union[str, Any] = self.vae.decode(UpperCAmelCase__ ).sample
_a : str = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_a : Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_a : List[Any] = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 294 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = '''mvp'''
UpperCamelCase : Union[str, Any] = ['''past_key_values''']
UpperCamelCase : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCAmelCase__ : List[str]=50267 , UpperCAmelCase__ : Optional[Any]=1024 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Optional[Any]=4096 , UpperCAmelCase__ : int=16 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : int=4096 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=1024 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Tuple=0.0_2 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=100 , UpperCAmelCase__ : Union[str, Any]=800 , **UpperCAmelCase__ : Dict , ) -> List[Any]:
_a : Any = vocab_size
_a : Any = max_position_embeddings
_a : Union[str, Any] = d_model
_a : List[str] = encoder_ffn_dim
_a : List[Any] = encoder_layers
_a : Dict = encoder_attention_heads
_a : Tuple = decoder_ffn_dim
_a : List[Any] = decoder_layers
_a : Optional[Any] = decoder_attention_heads
_a : Optional[Any] = dropout
_a : str = attention_dropout
_a : Dict = activation_dropout
_a : Any = activation_function
_a : Tuple = init_std
_a : Dict = encoder_layerdrop
_a : Optional[int] = decoder_layerdrop
_a : Optional[Any] = classifier_dropout
_a : List[Any] = use_cache
_a : Dict = encoder_layers
_a : str = scale_embedding # scale factor will be sqrt(d_model) if True
_a : int = use_prompt
_a : Dict = prompt_length
_a : Dict = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase__ ):
_a : List[str] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 294 | 1 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowerCAmelCase_ : List[str] = parse(importlib.metadata.version('''torch'''))
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
UpperCAmelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = parse(importlib.metadata.version(lowerCAmelCase ) )
return operation(lowerCAmelCase , parse(lowerCAmelCase ) )
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
return compare_versions(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 248 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase_ ( a_ ):
def __init__( self , *snake_case__ , snake_case__=None , snake_case__=None , **snake_case__ ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
UpperCAmelCase = eval_examples
UpperCAmelCase = post_process_function
def UpperCamelCase_ ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = "eval" ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase = self.get_eval_dataloader(snake_case__ )
UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
snake_case__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , metric_key_prefix=snake_case__ , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
snake_case__ , snake_case__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase = self.post_process_function(snake_case__ , snake_case__ , output.predictions )
UpperCAmelCase = self.compute_metrics(snake_case__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCAmelCase = metrics.pop(snake_case__ )
metrics.update(output.metrics )
else:
UpperCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(snake_case__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case__ )
return metrics
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__ = "test" ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.get_test_dataloader(snake_case__ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase = time.time()
try:
UpperCAmelCase = eval_loop(
snake_case__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , metric_key_prefix=snake_case__ , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
snake_case__ , snake_case__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase = self.post_process_function(snake_case__ , snake_case__ , output.predictions , """predict""" )
UpperCAmelCase = self.compute_metrics(snake_case__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCAmelCase = metrics.pop(snake_case__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case__ )
| 248 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ : List[str] = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
a_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 75 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a_ : Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Tuple =['pixel_values']
def __init__( self, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 224}
lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
lowerCamelCase_ =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase, param_name='''crop_size''' )
lowerCamelCase_ =do_resize
lowerCamelCase_ =size
lowerCamelCase_ =resample
lowerCamelCase_ =do_center_crop
lowerCamelCase_ =crop_size
lowerCamelCase_ =do_rescale
lowerCamelCase_ =rescale_factor
lowerCamelCase_ =do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase_ =image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase_ =do_convert_rgb
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase_ =get_resize_output_image_size(lowerCAmelCase, size=size['''shortest_edge'''], default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCAmelCase, size=(size['''height'''], size['''width''']), data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
return normalize(lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ =size if size is not None else self.size
lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''size''', default_to_square=lowerCAmelCase )
lowerCamelCase_ =resample if resample is not None else self.resample
lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''crop_size''', default_to_square=lowerCAmelCase )
lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ =image_std if image_std is not None else self.image_std
lowerCamelCase_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase_ =make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase_ =[convert_to_rgb(lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase_ =[to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
lowerCamelCase_ =[self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images]
if do_center_crop:
lowerCamelCase_ =[self.center_crop(image=lowerCAmelCase, size=lowerCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase_ =[self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase_ =[self.normalize(image=lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase ) for image in images]
lowerCamelCase_ =[to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowerCamelCase_ ={'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
| 75 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase: List[Any] = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Any = ["""DeiTFeatureExtractor"""]
UpperCAmelCase: Optional[Any] = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Union[str, Any] = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Tuple = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase: Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 336 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 336 | 1 |
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _a ):
def __lowercase ( self : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None ):
lowerCAmelCase = self.layer[current_layer](lowerCAmelCase , lowerCAmelCase , head_mask[current_layer] )
lowerCAmelCase = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , _a , )
class SCREAMING_SNAKE_CASE__ ( _a ):
def __init__( self : Tuple , lowerCAmelCase : int ):
super().__init__(lowerCAmelCase )
lowerCAmelCase = BertEncoderWithPabee(lowerCAmelCase )
self.init_weights()
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def __lowercase ( self : Any , lowerCAmelCase : Optional[Any] ):
lowerCAmelCase = threshold
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Any ):
lowerCAmelCase = patience
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = 0
lowerCAmelCase = 0
def __lowercase ( self : Any ):
lowerCAmelCase = self.inference_layers_num / self.inference_instances_num
lowerCAmelCase = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase )
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def __lowercase ( self : int , lowerCAmelCase : int=None , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Any=None , lowerCAmelCase : List[str]=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
lowerCAmelCase = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
lowerCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase = torch.ones(lowerCAmelCase , device=lowerCAmelCase )
if token_type_ids is None:
lowerCAmelCase = torch.zeros(lowerCAmelCase , dtype=torch.long , device=lowerCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase = self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = encoder_hidden_states.size()
lowerCAmelCase = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCAmelCase = torch.ones(lowerCAmelCase , device=lowerCAmelCase )
lowerCAmelCase = self.invert_attention_mask(lowerCAmelCase )
else:
lowerCAmelCase = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase = self.get_head_mask(lowerCAmelCase , self.config.num_hidden_layers )
lowerCAmelCase = self.embeddings(
input_ids=lowerCAmelCase , position_ids=lowerCAmelCase , token_type_ids=lowerCAmelCase , inputs_embeds=lowerCAmelCase )
lowerCAmelCase = embedding_output
if self.training:
lowerCAmelCase = []
for i in range(self.config.num_hidden_layers ):
lowerCAmelCase = self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
lowerCAmelCase = self.pooler(lowerCAmelCase )
lowerCAmelCase = output_layers[i](output_dropout(lowerCAmelCase ) )
res.append(lowerCAmelCase )
elif self.patience == 0: # Use all layers for inference
lowerCAmelCase = self.encoder(
lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )
lowerCAmelCase = self.pooler(encoder_outputs[0] )
lowerCAmelCase = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase )]
else:
lowerCAmelCase = 0
lowerCAmelCase = None
lowerCAmelCase = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCAmelCase = self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
lowerCAmelCase = self.pooler(lowerCAmelCase )
lowerCAmelCase = output_layers[i](lowerCAmelCase )
if regression:
lowerCAmelCase = logits.detach()
if patient_result is not None:
lowerCAmelCase = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCAmelCase = 0
else:
lowerCAmelCase = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCAmelCase = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase ) ):
patient_counter += 1
else:
lowerCAmelCase = 0
lowerCAmelCase = logits
if patient_counter == self.patience:
break
lowerCAmelCase = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , _a , )
class SCREAMING_SNAKE_CASE__ ( _a ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Any ):
super().__init__(lowerCAmelCase )
lowerCAmelCase = config.num_labels
lowerCAmelCase = BertModelWithPabee(lowerCAmelCase )
lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[int]=None , ):
lowerCAmelCase = self.bert(
input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , position_ids=lowerCAmelCase , head_mask=lowerCAmelCase , inputs_embeds=lowerCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCAmelCase = (logits[-1],)
if labels is not None:
lowerCAmelCase = None
lowerCAmelCase = 0
for ix, logits_item in enumerate(lowerCAmelCase ):
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase = MSELoss()
lowerCAmelCase = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase = CrossEntropyLoss()
lowerCAmelCase = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCAmelCase = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCAmelCase = (total_loss / total_weights,) + outputs
return outputs
| 155 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 155 | 1 |
"""simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_snake_case = logging.get_logger(__name__)
class UpperCamelCase :
UpperCamelCase : Any = None
@experimental
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return _map_with_joblib(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Any = num_proc if num_proc <= len(UpperCamelCase__ ) else len(UpperCamelCase__ )
_a : Optional[Any] = [] # We organize the splits ourselve (contiguous splits)
for index in range(UpperCamelCase__ ):
_a : Optional[int] = len(UpperCamelCase__ ) // num_proc
_a : Any = len(UpperCamelCase__ ) % num_proc
_a : Tuple = div * index + min(UpperCamelCase__ , UpperCamelCase__ )
_a : Optional[int] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(UpperCamelCase__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(UpperCamelCase__ )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(UpperCamelCase__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
_a , _a : Any = None, None
if not disable_tqdm:
_a , _a : Any = (RLock(),), tqdm.set_lock
with Pool(UpperCamelCase__ , initargs=UpperCamelCase__ , initializer=UpperCamelCase__ ) as pool:
_a : Optional[Any] = pool.map(UpperCamelCase__ , UpperCamelCase__ )
logger.info(F"""Finished {num_proc} processes""" )
_a : Optional[int] = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(UpperCamelCase__ )} objects""" )
return mapped
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=UpperCamelCase__ ):
return joblib.Parallel()(
joblib.delayed(UpperCamelCase__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : int = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_a : Optional[Any] = None
| 324 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Dict = ['''pixel_values''']
def __init__( self : Any , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : Optional[Any]=PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : List[str] , ) -> None:
_a : int = do_resize
_a : Union[str, Any] = do_rescale
_a : Any = size_divisor
_a : Any = resample
super().__init__(**UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[ChannelDimension] = None , **UpperCAmelCase__ : Optional[Any] ) -> np.ndarray:
_a , _a : Tuple = get_image_size(UpperCAmelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
_a : Optional[Any] = height // size_divisor * size_divisor
_a : Union[str, Any] = width // size_divisor * size_divisor
_a : Any = resize(UpperCAmelCase__ , (new_h, new_w) , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
return image
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[ChannelDimension] = None , **UpperCAmelCase__ : Optional[int] ) -> np.ndarray:
return rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[TensorType, str]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : int , ) -> BatchFeature:
_a : Dict = do_resize if do_resize is not None else self.do_resize
_a : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_a : str = size_divisor if size_divisor is not None else self.size_divisor
_a : Any = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
_a : List[str] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
_a : Tuple = [to_numpy_array(UpperCAmelCase__ ) for img in images]
if do_resize:
_a : Optional[int] = [self.resize(UpperCAmelCase__ , size_divisor=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
_a : str = [self.rescale(UpperCAmelCase__ , scale=1 / 255 ) for image in images]
_a : Any = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
_a : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 324 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : Tuple ) -> str:
lowerCamelCase__ : int = tempfile.mkdtemp()
lowerCamelCase__ : Tuple = BlipImageProcessor()
lowerCamelCase__ : str = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowerCamelCase__ : List[Any] = BlipaProcessor(UpperCAmelCase , UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def A_ ( self : List[str] , **UpperCAmelCase : str ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer
def A_ ( self : Optional[int] , **UpperCAmelCase : List[str] ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def A_ ( self : Optional[Any] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def A_ ( self : Tuple ) -> Any:
lowerCamelCase__ : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Optional[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : Dict ) -> List[str]:
lowerCamelCase__ : Optional[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase__ : Union[str, Any] = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase__ : str = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def A_ ( self : Tuple ) -> str:
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Any = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase__ : List[Any] = image_processor(UpperCAmelCase , return_tensors='np' )
lowerCamelCase__ : List[Any] = processor(images=UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : int ) -> Any:
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : int = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = 'lower newer'
lowerCamelCase__ : Optional[int] = processor(text=UpperCAmelCase )
lowerCamelCase__ : Tuple = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : Tuple ) -> List[Any]:
lowerCamelCase__ : Any = self.get_image_processor()
lowerCamelCase__ : Dict = self.get_tokenizer()
lowerCamelCase__ : Optional[Any] = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = 'lower newer'
lowerCamelCase__ : Optional[Any] = self.prepare_image_inputs()
lowerCamelCase__ : Optional[Any] = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def A_ ( self : Dict ) -> Union[str, Any]:
lowerCamelCase__ : Optional[int] = self.get_image_processor()
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : str = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : str = processor.batch_decode(UpperCAmelCase )
lowerCamelCase__ : str = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Tuple ) -> str:
lowerCamelCase__ : Optional[int] = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : List[str] = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : Any = 'lower newer'
lowerCamelCase__ : Optional[Any] = self.prepare_image_inputs()
lowerCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase , images=UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 50 |
'''simple docstring'''
from typing import List
import numpy as np
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
_A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
_A = max(lists_lengths.values() , default=0 )
return max(1 , __lowercase )
def __lowercase ( __lowercase , __lowercase ) -> List[range]:
'''simple docstring'''
_A = []
for group_idx in range(__lowercase ):
_A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_A = range(__lowercase , start + num_shards_to_add )
shards_indices_per_group.append(__lowercase )
return shards_indices_per_group
def __lowercase ( __lowercase , __lowercase ) -> List[dict]:
'''simple docstring'''
_A = _number_of_shards_in_gen_kwargs(__lowercase )
if num_shards == 1:
return [dict(__lowercase )]
else:
_A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__lowercase , __lowercase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__lowercase ) )
]
def __lowercase ( __lowercase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __lowercase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __lowercase ( __lowercase , __lowercase ) -> dict:
'''simple docstring'''
_A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )}
_A = {}
for size in list_sizes:
_A = list(range(__lowercase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_A = dict(__lowercase )
for key, value in shuffled_kwargs.items():
if isinstance(__lowercase , __lowercase ):
_A = [value[i] for i in indices_per_size[len(__lowercase )]]
return shuffled_kwargs
| 79 | 0 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
_A = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
_A = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def UpperCAmelCase ( a_ ):
lowerCamelCase : str = (images / 2 + 0.5).clamp(0, 1 )
lowerCamelCase : int = images.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase : List[Any] = numpy_to_pil(a_ )
return images
def UpperCAmelCase ( a_ ):
if images.ndim == 3:
lowerCamelCase : Tuple = images[None, ...]
lowerCamelCase : Tuple = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowerCamelCase : Dict = [Image.fromarray(image.squeeze(), mode='L' ) for image in images]
else:
lowerCamelCase : Dict = [Image.fromarray(a_ ) for image in images]
return pil_images
| 370 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class _lowercase :
def __init__( self ) -> Tuple:
lowerCamelCase : Dict = False
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Any:
if not self.initialized:
lowerCamelCase : Optional[int] = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=UpperCAmelCase_ , generator_tokenizer=UpperCAmelCase_ , index=UpperCAmelCase_ , init_retrieval=UpperCAmelCase_ , )
lowerCamelCase : Union[str, Any] = True
def _UpperCamelCase ( self ) -> Union[str, Any]:
self.retriever.index.init_index()
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> str:
lowerCamelCase , lowerCamelCase : int = self.retriever._main_retrieve(UpperCAmelCase_ , UpperCAmelCase_ )
return doc_ids, retrieved_doc_embeds
class _lowercase ( __UpperCAmelCase ):
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ) -> Dict:
if index is not None and index.is_initialized() and len(UpperCAmelCase_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
UpperCAmelCase_ , question_encoder_tokenizer=UpperCAmelCase_ , generator_tokenizer=UpperCAmelCase_ , index=UpperCAmelCase_ , init_retrieval=UpperCAmelCase_ , )
lowerCamelCase : List[str] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for worker in self.retrieval_workers
] )
def _UpperCamelCase ( self ) -> Dict:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> List[Any]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowerCamelCase : Tuple = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowerCamelCase , lowerCamelCase : Tuple = ray.get(random_worker.retrieve.remote(UpperCAmelCase_ , UpperCAmelCase_ ) )
else:
lowerCamelCase , lowerCamelCase : int = self._main_retrieve(UpperCAmelCase_ , UpperCAmelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase_ )
@classmethod
def _UpperCamelCase ( cls , UpperCAmelCase_ , UpperCAmelCase_=None , **UpperCAmelCase_ ) -> Dict:
return super(UpperCAmelCase_ , cls ).get_tokenizers(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
@classmethod
def _UpperCamelCase ( cls , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , **UpperCAmelCase_ ) -> Dict:
lowerCamelCase : str = kwargs.pop('config' , UpperCAmelCase_ ) or RagConfig.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase : Any = RagTokenizer.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
lowerCamelCase : Any = rag_tokenizer.question_encoder
lowerCamelCase : Union[str, Any] = rag_tokenizer.generator
if indexed_dataset is not None:
lowerCamelCase : int = 'custom'
lowerCamelCase : int = CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ )
else:
lowerCamelCase : Optional[Any] = cls._build_index(UpperCAmelCase_ )
return cls(
UpperCAmelCase_ , question_encoder_tokenizer=UpperCAmelCase_ , generator_tokenizer=UpperCAmelCase_ , retrieval_workers=UpperCAmelCase_ , index=UpperCAmelCase_ , )
| 205 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowercase__ :List[str] = logging.get_logger(__name__)
lowercase__ :List[Any] = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
lowercase__ :str = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowercase__ :Optional[Any] = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowercase__ :Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
lowercase__ :List[str] = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
lowercase__ :str = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
lowercase__ :Union[str, Any] = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
lowercase__ :Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
lowercase__ :str = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
lowercase__ :Any = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
lowercase__ :List[str] = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
lowercase__ :List[str] = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
lowercase__ :Optional[Any] = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
lowercase__ :Optional[int] = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
lowercase__ :List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowercase__ :Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowercase__ :Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowercase__ :int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowercase__ :Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowercase__ :str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowercase__ :Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowercase__ :Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowercase__ :Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowercase__ :List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowercase__ :Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowercase__ :Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowercase__ :Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowercase__ :Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowercase ( _BaseAutoModelClass ):
lowercase_ : Optional[Any] =FLAX_MODEL_MAPPING
lowercase__ :List[str] = auto_class_update(FlaxAutoModel)
class lowercase ( _BaseAutoModelClass ):
lowercase_ : Tuple =FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowercase__ :Any = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class lowercase ( _BaseAutoModelClass ):
lowercase_ : Tuple =FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowercase__ :str = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class lowercase ( _BaseAutoModelClass ):
lowercase_ : Tuple =FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowercase__ :List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class lowercase ( _BaseAutoModelClass ):
lowercase_ : Dict =FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase__ :Any = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class lowercase ( _BaseAutoModelClass ):
lowercase_ : str =FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase__ :List[str] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class lowercase ( _BaseAutoModelClass ):
lowercase_ : Optional[Any] =FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowercase__ :List[Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class lowercase ( _BaseAutoModelClass ):
lowercase_ : List[Any] =FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase__ :List[str] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class lowercase ( _BaseAutoModelClass ):
lowercase_ : Dict =FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowercase__ :Union[str, Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class lowercase ( _BaseAutoModelClass ):
lowercase_ : List[Any] =FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowercase__ :int = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class lowercase ( _BaseAutoModelClass ):
lowercase_ : str =FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase__ :Optional[int] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class lowercase ( _BaseAutoModelClass ):
lowercase_ : Tuple =FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase__ :int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class lowercase ( _BaseAutoModelClass ):
lowercase_ : str =FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowercase__ :Any = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 101 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCamelCase__ :
def __init__(self : List[Any] , snake_case_ : int , snake_case_ : List[str]=1_3 , snake_case_ : Tuple=7 , snake_case_ : List[Any]=True , snake_case_ : List[Any]=True , snake_case_ : Dict=True , snake_case_ : Optional[int]=True , snake_case_ : str=9_9 , snake_case_ : Dict=6_4 , snake_case_ : Any=3_2 , snake_case_ : str=5 , snake_case_ : int=4 , snake_case_ : List[Any]=3_7 , snake_case_ : Any="gelu" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : str=5_1_2 , snake_case_ : Any=1_6 , snake_case_ : str=2 , snake_case_ : int=0.02 , snake_case_ : Union[str, Any]=3 , snake_case_ : Optional[Any]=4 , snake_case_ : List[Any]=None , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : Any = seq_length
__a : int = is_training
__a : Optional[int] = use_input_mask
__a : List[Any] = use_token_type_ids
__a : Dict = use_labels
__a : Tuple = vocab_size
__a : str = hidden_size
__a : List[Any] = embedding_size
__a : List[Any] = num_hidden_layers
__a : str = num_attention_heads
__a : str = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Optional[Any] = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Union[str, Any] = max_position_embeddings
__a : Any = type_vocab_size
__a : int = type_sequence_label_size
__a : int = initializer_range
__a : int = num_labels
__a : Union[str, Any] = num_choices
__a : Dict = scope
def lowerCAmelCase (self : str ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[Any] = None
if self.use_input_mask:
__a : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[Any] = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Dict = None
__a : List[str] = None
__a : Optional[Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase (self : int ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase (self : str , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : int , snake_case_ : int , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Any ):
__a : Any = MobileBertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : List[str] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
__a : Optional[Any] = model(snake_case_ , token_type_ids=snake_case_ )
__a : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase (self : Any , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : str , snake_case_ : List[Any] ):
__a : str = MobileBertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase (self : Tuple , snake_case_ : Any , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Dict ):
__a : Optional[Any] = MobileBertForNextSentencePrediction(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : int = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase (self : Any , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[Any] ):
__a : str = MobileBertForPreTraining(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Union[str, Any] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , next_sentence_label=snake_case_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase (self : Dict , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Dict , snake_case_ : int , snake_case_ : int , snake_case_ : str , snake_case_ : str ):
__a : str = MobileBertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Optional[Any] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase (self : Optional[int] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Optional[int] ):
__a : Any = self.num_labels
__a : Union[str, Any] = MobileBertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase (self : List[Any] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Optional[int] ):
__a : Union[str, Any] = self.num_labels
__a : str = MobileBertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Any = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Union[str, Any] ):
__a : Union[str, Any] = self.num_choices
__a : List[str] = MobileBertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Any = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase (self : Optional[Any] ):
__a : Optional[Any] = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : int = config_and_inputs
__a : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Any = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def lowerCAmelCase (self : str , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Union[str, Any]=False ):
__a : List[str] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
__a : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ )
__a : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def lowerCAmelCase (self : Tuple ):
__a : List[Any] = MobileBertModelTester(self )
__a : int = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def lowerCAmelCase (self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase (self : Optional[Any] ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case_ )
def lowerCAmelCase (self : str ):
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ )
def lowerCAmelCase (self : Tuple ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ )
def lowerCAmelCase (self : Dict ):
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ )
def lowerCAmelCase (self : int ):
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ )
def lowerCAmelCase (self : List[Any] ):
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ )
def lowerCAmelCase (self : int ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ )
def lowerCAmelCase (self : Tuple ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
return torch.tensor(
lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ , )
lowercase__ =1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase (self : Any ):
__a : Dict = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(snake_case_ )
__a : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
__a : str = model(snake_case_ )[0]
__a : List[Any] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape , snake_case_ )
__a : Union[str, Any] = torch.tensor(
[
[
[-2.473_6526E07, 8.269_1656E04, 1.652_1838E05],
[-5.754_1704E-01, 3.905_6022E00, 4.401_1507E00],
[2.604_7359E00, 1.567_7652E00, -1.732_4188E-01],
]
] , device=snake_case_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__a : List[str] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__a : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 216 | 0 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
UpperCAmelCase_ : List[str] = 0
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ )
count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def a__ ( ) -> None:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 67 | 0 |
import sys
a_ = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _a ( UpperCamelCase_ : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
for digit in s:
product *= int(UpperCamelCase_ )
return product
def _a ( UpperCamelCase_ : str = N ) -> int:
"""simple docstring"""
lowerCAmelCase__ = -sys.maxsize - 1
lowerCAmelCase__ = n[:13]
lowerCAmelCase__ = 13
while cur_index < len(UpperCamelCase_ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
lowerCAmelCase__ = substr[1:] + n[cur_index]
cur_index += 1
else:
lowerCAmelCase__ = max(UpperCamelCase_ , str_eval(UpperCamelCase_ ) )
lowerCAmelCase__ = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"{solution() = }")
| 340 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _a ( UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ViTMSNConfig()
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "datasets/huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1_536
lowerCAmelCase__ = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ )
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"]
lowerCAmelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
lowerCAmelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 340 | 1 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self , lowercase ):
A_ : List[Any] = 3
A_ : Any = 250
A_ : Optional[int] = ids_tensor((batch_size, length) , lowercase )
A_ : Any = torch.ones((batch_size, length) , device=lowercase , dtype=torch.float ) / length
return input_ids, scores
def _a (self ):
A_, A_ : Union[str, Any] = self._get_tensors(5 )
A_ : Union[str, Any] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowercase , lowercase ) )
A_, A_ : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(lowercase , lowercase ) )
A_, A_ : Dict = self._get_tensors(10 )
self.assertTrue(criteria(lowercase , lowercase ) )
def _a (self ):
A_ : Any = MaxLengthCriteria(max_length=10 )
A_, A_ : Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(lowercase , lowercase ) )
A_, A_ : int = self._get_tensors(9 )
self.assertFalse(criteria(lowercase , lowercase ) )
A_, A_ : Any = self._get_tensors(10 )
self.assertTrue(criteria(lowercase , lowercase ) )
def _a (self ):
A_ : int = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
A_, A_ : str = self._get_tensors(5 )
self.assertFalse(criteria(lowercase , lowercase ) )
A_, A_ : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(lowercase , lowercase ) )
A_, A_ : str = self._get_tensors(10 )
self.assertTrue(criteria(lowercase , lowercase ) )
A_ : List[str] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _a (self ):
A_, A_ : Dict = self._get_tensors(5 )
A_ : List[Any] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowercase , lowercase ) )
A_ : Dict = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowercase , lowercase ) )
def _a (self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowercase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
A_ : str = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowercase ) , 1 ) | 135 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _lowerCAmelCase :
@property
def _a (self ):
return self.get_dummy_input()
@property
def _a (self ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def _a (self , lowercase=True , lowercase=False , lowercase=False , lowercase=False , ):
A_ : List[str] = 4
A_ : int = 32
A_ : Optional[int] = (32, 32)
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : int = torch.device(lowercase )
A_ : int = (batch_size, num_channels) + sizes
A_ : Optional[int] = randn_tensor(lowercase , generator=lowercase , device=lowercase )
A_ : Union[str, Any] = {"""hidden_states""": hidden_states}
if include_temb:
A_ : str = 128
A_ : List[Any] = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
A_ : List[str] = torch.manual_seed(1 )
A_ : int = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
A_ : List[str] = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
A_ : Dict = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def _a (self ):
A_ : Tuple = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
A_ : Any = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self , lowercase ):
A_, A_ : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
A_ : int = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
A_ : List[str] = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ : int = output[0, -1, -3:, -3:]
A_ : List[Any] = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def _a (self ):
A_, A_ : Tuple = self.prepare_init_args_and_inputs_for_common()
A_ : List[str] = self.block_class(**lowercase )
model.to(lowercase )
model.train()
A_ : Any = model(**lowercase )
if isinstance(lowercase , lowercase ):
A_ : str = output[0]
A_ : Union[str, Any] = torch.device(lowercase )
A_ : Tuple = randn_tensor(output.shape , device=lowercase )
A_ : List[str] = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward() | 135 | 1 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __snake_case ( _lowercase):
snake_case__ : Any = DistilBertTokenizer
snake_case__ : Optional[Any] = DistilBertTokenizerFast
snake_case__ : int = True
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Tuple = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
_lowerCamelCase : int = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
_lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __snake_case (_a ):
def __init__( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = params
_lowerCAmelCase : int = np.array(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = np.array([len(_UpperCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.lengths )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : str = self.params.max_model_input_size
_lowerCAmelCase : List[str] = self.lengths > max_len
logger.info(f"Splitting {sum(_UpperCAmelCase )} too long sequences." )
def divide_chunks(_UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ):
return [l[i : i + n] for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase )]
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Dict = []
if self.params.mlm:
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_lowerCAmelCase : Tuple = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_lowerCAmelCase : int = np.insert(_UpperCAmelCase , 0 , _UpperCAmelCase )
if sub_s[-1] != sep_id:
_lowerCAmelCase : Optional[int] = np.insert(_UpperCAmelCase , len(_UpperCAmelCase ) , _UpperCAmelCase )
assert len(_UpperCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_UpperCAmelCase )
new_tok_ids.extend(_UpperCAmelCase )
new_lengths.extend([len(_UpperCAmelCase ) for l in sub_seqs] )
_lowerCAmelCase : Any = np.array(_UpperCAmelCase )
_lowerCAmelCase : str = np.array(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : int = len(self )
_lowerCAmelCase : Union[str, Any] = self.lengths > 11
_lowerCAmelCase : Optional[int] = self.token_ids[indices]
_lowerCAmelCase : int = self.lengths[indices]
_lowerCAmelCase : Optional[Any] = len(self )
logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
_lowerCAmelCase : Union[str, Any] = self.params.special_tok_ids["""unk_token"""]
_lowerCAmelCase : Any = len(self )
_lowerCAmelCase : Optional[Any] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_lowerCAmelCase : Optional[Any] = (unk_occs / self.lengths) < 0.5
_lowerCAmelCase : Optional[int] = self.token_ids[indices]
_lowerCAmelCase : Any = self.lengths[indices]
_lowerCAmelCase : List[str] = len(self )
logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [t[0] for t in batch]
_lowerCAmelCase : Any = [t[1] for t in batch]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
# Max for paddings
_lowerCAmelCase : Union[str, Any] = max(_UpperCAmelCase )
# Pad token ids
if self.params.mlm:
_lowerCAmelCase : List[str] = self.params.special_tok_ids["""pad_token"""]
else:
_lowerCAmelCase : Tuple = self.params.special_tok_ids["""unk_token"""]
_lowerCAmelCase : Optional[int] = [list(t.astype(_UpperCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(_UpperCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(_UpperCAmelCase )
assert all(len(_UpperCAmelCase ) == max_seq_len_ for t in tk_ )
_lowerCAmelCase : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
_lowerCAmelCase : Tuple = torch.tensor(_UpperCAmelCase ) # (bs)
return tk_t, lg_t
| 159 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowerCamelCase : Dict = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int ):
'''simple docstring'''
for attribute in key.split(""".""" ):
_lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
_lowerCAmelCase : Optional[Any] = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
_lowerCAmelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_lowerCAmelCase : Tuple = value
elif weight_type == "weight_g":
_lowerCAmelCase : Optional[int] = value
elif weight_type == "weight_v":
_lowerCAmelCase : str = value
elif weight_type == "bias":
_lowerCAmelCase : str = value
else:
_lowerCAmelCase : Tuple = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Tuple = fairseq_model.state_dict()
_lowerCAmelCase : str = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_lowerCAmelCase : Any = None
for name, value in fairseq_dict.items():
_lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == """group""" , )
_lowerCAmelCase : Any = True
elif name.split(""".""" )[0] == "proj":
_lowerCAmelCase : Union[str, Any] = fairseq_model.proj
_lowerCAmelCase : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase : Union[str, Any] = True
if "*" in mapped_key:
_lowerCAmelCase : Union[str, Any] = name.split(UpperCamelCase_ )[0].split(""".""" )[-2]
_lowerCAmelCase : Optional[Any] = mapped_key.replace("""*""" , UpperCamelCase_ )
if "weight_g" in name:
_lowerCAmelCase : List[str] = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase : Tuple = """weight_v"""
elif "bias" in name:
_lowerCAmelCase : Dict = """bias"""
elif "weight" in name:
_lowerCAmelCase : Optional[int] = """weight"""
else:
_lowerCAmelCase : Optional[Any] = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def _UpperCAmelCase (UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : List[str] = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase : Optional[int] = name.split(""".""" )
_lowerCAmelCase : List[str] = int(items[0] )
_lowerCAmelCase : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_lowerCAmelCase : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_lowerCAmelCase : Any = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_lowerCAmelCase : Tuple = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_lowerCAmelCase : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCamelCase_ )
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = emb.weight.shape
_lowerCAmelCase : Union[str, Any] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
_lowerCAmelCase : List[Any] = emb.weight.data
return lin_layer
def _UpperCAmelCase (UpperCamelCase_ : Any ):
'''simple docstring'''
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as f:
_lowerCAmelCase : Optional[int] = f.readlines()
_lowerCAmelCase : Dict = [line.split(""" """ )[0] for line in lines]
_lowerCAmelCase : Dict = len(UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(UpperCamelCase_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , ):
'''simple docstring'''
_lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = SpeechaTextaConfig.from_pretrained(
UpperCamelCase_ , vocab_size=UpperCamelCase_ , decoder_layers=UpperCamelCase_ , do_stable_layer_norm=UpperCamelCase_ )
_lowerCAmelCase : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_lowerCAmelCase : Tuple = model[0].eval()
# set weights for wav2vec2 encoder
_lowerCAmelCase : Union[str, Any] = WavaVecaModel(UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = recursively_load_weights_wavaveca(model.encoder , UpperCamelCase_ )
_lowerCAmelCase : List[str] = SpeechaTextaForCausalLM(UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase_ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
_lowerCAmelCase : Dict = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
_lowerCAmelCase : List[Any] = SpeechEncoderDecoderModel(encoder=UpperCamelCase_ , decoder=UpperCamelCase_ )
_lowerCAmelCase : Any = False
# add projection layer
_lowerCAmelCase : List[Any] = nn.Parameter(projection_layer.weight )
_lowerCAmelCase : Union[str, Any] = nn.Parameter(projection_layer.bias )
_lowerCAmelCase : Any = create_vocab_dict(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , """vocab.json""" ) , """w""" ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Any = SpeechaTextaTokenizer(os.path.join(UpperCamelCase_ , """vocab.json""" ) )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowerCAmelCase : str = hf_wavavec.config.to_dict()
_lowerCAmelCase : Any = tokenizer.pad_token_id
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : Any = tokenizer.eos_token_id
_lowerCAmelCase : Union[str, Any] = """speech_to_text_2"""
_lowerCAmelCase : Any = """wav2vec2"""
_lowerCAmelCase : str = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase_ )
hf_wavavec.save_pretrained(UpperCamelCase_ )
feature_extractor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0_2_2_4, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 159 | 1 |
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = len(__UpperCamelCase ) + 1
lowerCAmelCase_ : Tuple = len(__UpperCamelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowerCAmelCase_ : List[Any] = [[0 for i in range(__UpperCamelCase )] for j in range(__UpperCamelCase )]
# since string of zero length match pattern of zero length
lowerCAmelCase_ : Optional[Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __UpperCamelCase ):
lowerCAmelCase_ : int = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __UpperCamelCase ):
lowerCAmelCase_ : Optional[int] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __UpperCamelCase ):
for j in range(1 , __UpperCamelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowerCAmelCase_ : Optional[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowerCAmelCase_ : Optional[int] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowerCAmelCase_ : List[str] = dp[i - 1][j]
else:
lowerCAmelCase_ : Tuple = 0
else:
lowerCAmelCase_ : int = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
lowercase__ = """aab"""
lowercase__ = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 241 |
"""simple docstring"""
import os
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = os.path.dirname(os.path.realpath(__UpperCamelCase ) )
lowerCAmelCase_ : List[str] = os.path.join(__UpperCamelCase , "triangle.txt" )
with open(__UpperCamelCase ) as f:
lowerCAmelCase_ : Optional[int] = f.readlines()
lowerCAmelCase_ : Union[str, Any] = []
for line in triangle:
lowerCAmelCase_ : Any = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(__UpperCamelCase ) )
a.append(__UpperCamelCase )
for i in range(1 , len(__UpperCamelCase ) ):
for j in range(len(a[i] ) ):
lowerCAmelCase_ : Optional[Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCAmelCase_ : int = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__UpperCamelCase , __UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 241 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: List[str] , __UpperCAmelCase: str , __UpperCAmelCase: Optional[Any]=None , __UpperCAmelCase: Any=None ) -> List[str]:
if "." in tensor_name:
UpperCamelCase__ : str = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase__ : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
UpperCamelCase__ : str = new_module
UpperCamelCase__ : Any = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}." )
UpperCamelCase__ : Tuple = tensor_name in module._buffers
UpperCamelCase__ : Union[str, Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
UpperCamelCase__ : int = False
UpperCamelCase__ : List[str] = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase__ : int = False
UpperCamelCase__ : Union[str, Any] = False
else:
UpperCamelCase__ : List[Any] = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCamelCase__ : Tuple = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase__ : Union[str, Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase__ : int = old_value.to(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , torch.Tensor ):
UpperCamelCase__ : List[str] = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase__ : List[Any] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase__ : int = torch.tensor(lowerCAmelCase__ , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowerCAmelCase__ ) and fpaa_statistics is None:
UpperCamelCase__ : Dict = new_value.T
UpperCamelCase__ : List[str] = old_value.__dict__
if is_abit:
UpperCamelCase__ : Optional[Any] = bnb.nn.IntaParams(lowerCAmelCase__ , requires_grad=lowerCAmelCase__ , **lowerCAmelCase__ ).to(lowerCAmelCase__ )
elif is_abit:
UpperCamelCase__ : str = bnb.nn.Paramsabit(lowerCAmelCase__ , requires_grad=lowerCAmelCase__ , **lowerCAmelCase__ ).to(lowerCAmelCase__ )
UpperCamelCase__ : List[Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(lowerCAmelCase__ ) )
else:
if value is None:
UpperCamelCase__ : List[str] = old_value.to(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , torch.Tensor ):
UpperCamelCase__ : Optional[int] = value.to(lowerCAmelCase__ )
else:
UpperCamelCase__ : Optional[Any] = torch.tensor(lowerCAmelCase__ , device=lowerCAmelCase__ )
if is_buffer:
UpperCamelCase__ : Union[str, Any] = new_value
else:
UpperCamelCase__ : Union[str, Any] = nn.Parameter(lowerCAmelCase__ , requires_grad=old_value.requires_grad )
UpperCamelCase__ : List[Any] = new_value
def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: str=None , __UpperCAmelCase: List[str]=None , __UpperCAmelCase: str=None , __UpperCAmelCase: Optional[Any]=False ) -> Any:
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase__ : Optional[Any] = []
current_key_name.append(lowerCAmelCase__ )
if (isinstance(lowerCAmelCase__ , nn.Linear ) or isinstance(lowerCAmelCase__ , lowerCAmelCase__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(lowerCAmelCase__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCamelCase__ : List[str] = module.weight.shape
else:
UpperCamelCase__ : str = module.in_features
UpperCamelCase__ : Dict = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase__ : str = bnb.nn.LinearabitLt(
lowerCAmelCase__ , lowerCAmelCase__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCamelCase__ : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase__ : Union[str, Any] = bnb.nn.Linearabit(
lowerCAmelCase__ , lowerCAmelCase__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCamelCase__ : Optional[int] = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase__ : Any = type(lowerCAmelCase__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowerCAmelCase__ )
if len(list(module.children() ) ) > 0:
UpperCamelCase__ : Any = _replace_with_bnb_linear(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , has_been_replaced=lowerCAmelCase__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: List[Any]=None , __UpperCAmelCase: int=None , __UpperCAmelCase: List[str]=None ) -> str:
UpperCamelCase__ : Optional[Any] = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase__ : List[Any] = _replace_with_bnb_linear(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def lowerCAmelCase_ ( *__UpperCAmelCase: List[str] , **__UpperCAmelCase: Any ) -> Dict:
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , lowerCAmelCase__ , )
return replace_with_bnb_linear(*lowerCAmelCase__ , **lowerCAmelCase__ )
def lowerCAmelCase_ ( *__UpperCAmelCase: Union[str, Any] , **__UpperCAmelCase: Optional[Any] ) -> int:
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , lowerCAmelCase__ , )
return set_module_quantized_tensor_to_device(*lowerCAmelCase__ , **lowerCAmelCase__ )
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> int:
UpperCamelCase__ : Any = deepcopy(lowerCAmelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase__ : Optional[Any] = find_tied_parameters(lowerCAmelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCamelCase__ : Any = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase__ : Dict = sum(lowerCAmelCase__ , [] )
UpperCamelCase__ : List[Any] = len(lowerCAmelCase__ ) > 0
# Check if it is a base model
UpperCamelCase__ : Tuple = not hasattr(lowerCAmelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase__ : int = list(model.named_children() )
UpperCamelCase__ : str = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase__ : List[str] = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ )
UpperCamelCase__ : str = list(set(lowerCAmelCase__ ) ) + list(lowerCAmelCase__ )
# remove ".weight" from the keys
UpperCamelCase__ : Tuple = [""".weight""", """.bias"""]
UpperCamelCase__ : str = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase__ : Union[str, Any] = name.replace(lowerCAmelCase__ , '''''' )
filtered_module_names.append(lowerCAmelCase__ )
return filtered_module_names
| 366 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> Optional[Any]:
UpperCamelCase__ : Any = test_results.split(''' ''' )
UpperCamelCase__ : Dict = 0
UpperCamelCase__ : int = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCamelCase__ : List[Any] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> Tuple:
UpperCamelCase__ : List[Any] = {}
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : int = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , __UpperCAmelCase ):
UpperCamelCase__ : Any = True
UpperCamelCase__ : Optional[Any] = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
UpperCamelCase__ : List[Any] = line
UpperCamelCase__ : List[Any] = False
return failures
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = title
UpperCamelCase__ : Tuple = doc_test_results['''time_spent'''].split(''',''' )[0]
UpperCamelCase__ : Optional[Any] = doc_test_results['''success''']
UpperCamelCase__ : str = doc_test_results['''failures''']
UpperCamelCase__ : str = self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCamelCase__ : List[Any] = doc_test_results
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] = [self._time_spent]
UpperCamelCase__ : str = 0
for time in time_spent:
UpperCamelCase__ : List[Any] = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__magic_name__ ) == 1:
UpperCamelCase__ : List[Any] = [0, 0, time_parts[0]]
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[str] = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(__magic_name__ )}h{int(__magic_name__ )}m{int(__magic_name__ )}s"
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = 40
UpperCamelCase__ : Tuple = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(__magic_name__, __magic_name__ )}
UpperCamelCase__ : List[str] = ''''''
for category, failures in category_failures.items():
if len(__magic_name__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__magic_name__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(__magic_name__ )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text='''There was an issue running the tests.''', blocks=__magic_name__, )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
UpperCamelCase__ : List[str] = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
UpperCamelCase__ : Optional[Any] = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], blocks=self.payload, text=__magic_name__, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = ''''''
for key, value in failures.items():
UpperCamelCase__ : List[Any] = value[:200] + ''' [Truncated]''' if len(__magic_name__ ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
UpperCamelCase__ : Union[str, Any] = job_name
UpperCamelCase__ : Any = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
UpperCamelCase__ : Union[str, Any] = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
UpperCamelCase__ : Optional[int] = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
UpperCamelCase__ : Optional[int] = sorted(self.doc_test_results.items(), key=lambda __magic_name__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
UpperCamelCase__ : Any = f"*Num failures* :{len(job_result['failed'] )} \n"
UpperCamelCase__ : Optional[Any] = job_result['''failures''']
UpperCamelCase__ : Optional[Any] = self.get_reply_blocks(__magic_name__, __magic_name__, __magic_name__, text=__magic_name__ )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text=f"Results for {job}", blocks=__magic_name__, thread_ts=self.thread_ts['''ts'''], )
time.sleep(1 )
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase__ : Any = os.environ['''GITHUB_RUN_ID''']
UpperCamelCase__ : Tuple = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
UpperCamelCase__ : Optional[int] = requests.get(__UpperCAmelCase ).json()
UpperCamelCase__ : List[Any] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
UpperCamelCase__ : List[Any] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__UpperCAmelCase ):
UpperCamelCase__ : Any = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , __UpperCAmelCase )
return {}
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> List[Any]:
UpperCamelCase__ : Optional[int] = {}
if os.path.exists(__UpperCAmelCase ):
UpperCamelCase__ : Dict = os.listdir(__UpperCAmelCase )
for file in files:
try:
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , encoding='''utf-8''' ) as f:
UpperCamelCase__ : int = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(__UpperCAmelCase , __UpperCAmelCase )}." ) from e
return _artifact
def lowerCAmelCase_ ( ) -> str:
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = name
UpperCamelCase__ : int = []
def __str__( self ) -> Tuple:
"""simple docstring"""
return self.name
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
self.paths.append({'''name''': self.name, '''path''': path} )
UpperCamelCase__ : Dict[str, Artifact] = {}
UpperCamelCase__ : Union[str, Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCamelCase__ : Optional[int] = directory
if artifact_name not in _available_artifacts:
UpperCamelCase__ : Union[str, Any] = Artifact(__UpperCAmelCase )
_available_artifacts[artifact_name].add_path(__UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase_ = get_job_links()
UpperCAmelCase_ = retrieve_available_artifacts()
UpperCAmelCase_ = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase_ = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase_ = github_actions_job_links.get('run_doctests')
UpperCAmelCase_ = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
UpperCAmelCase_ = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = handle_test_results(artifact['stats'])
UpperCAmelCase_ = failed
UpperCAmelCase_ = success
UpperCAmelCase_ = time_spent[1:-1] + ', '
UpperCAmelCase_ = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
UpperCAmelCase_ = line.replace('FAILED ', '')
UpperCAmelCase_ = line.split()[0].replace('\n', '')
if "::" in line:
UpperCAmelCase_ , UpperCAmelCase_ = line.split('::')
else:
UpperCAmelCase_ , UpperCAmelCase_ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase_ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase_ = all_failures[test] if test in all_failures else 'N/A'
UpperCAmelCase_ = failure
break
UpperCAmelCase_ = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 247 | 0 |
from __future__ import annotations
def _UpperCAmelCase ( a__ = 4):
'''simple docstring'''
a_ : Any = abs(a__) or 4
return [[1 + x + y * row_size for x in range(a__)] for y in range(a__)]
def _UpperCAmelCase ( a__):
'''simple docstring'''
return reverse_row(transpose(a__))
# OR.. transpose(reverse_column(matrix))
def _UpperCAmelCase ( a__):
'''simple docstring'''
return reverse_row(reverse_column(a__))
# OR.. reverse_column(reverse_row(matrix))
def _UpperCAmelCase ( a__):
'''simple docstring'''
return reverse_column(transpose(a__))
# OR.. transpose(reverse_row(matrix))
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Dict = [list(a__) for x in zip(*a__)]
return matrix
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Union[str, Any] = matrix[::-1]
return matrix
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : str = [x[::-1] for x in matrix]
return matrix
def _UpperCAmelCase ( a__):
'''simple docstring'''
for i in matrix:
print(*a__)
if __name__ == "__main__":
__snake_case : Optional[int] = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
__snake_case : List[Any] = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
__snake_case : str = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 248 |
__snake_case : Dict = 6_55_21
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[str] = 1
a_ : Any = 0
for plain_chr in plain_text:
a_ : Tuple = (a + ord(a__)) % MOD_ADLER
a_ : Union[str, Any] = (b + a) % MOD_ADLER
return (b << 1_6) | a
| 248 | 1 |
import math
import sys
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
if number != int(UpperCAmelCase__ ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
UpperCamelCase_ = [-1] * (number + 1)
UpperCamelCase_ = 0
for i in range(1 , number + 1 ):
UpperCamelCase_ = sys.maxsize
UpperCamelCase_ = int(math.sqrt(UpperCAmelCase__ ) )
for j in range(1 , root + 1 ):
UpperCamelCase_ = 1 + answers[i - (j**2)]
UpperCamelCase_ = min(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE :Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :Optional[int] = XGLMTokenizer
UpperCamelCase_ :List[str] = XGLMTokenizerFast
UpperCamelCase_ :int = True
UpperCamelCase_ :Dict = True
def UpperCAmelCase_ ( self )-> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ = XGLMTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = "<pad>"
UpperCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(_lowercase ) , 1_008 )
def UpperCAmelCase_ ( self )-> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = XGLMTokenizer(_lowercase , keep_accents=_lowercase )
UpperCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCamelCase_ = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCAmelCase_ ( self )-> Optional[Any]:
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_lowercase , f.name )
UpperCamelCase_ = XGLMTokenizer(f.name , keep_accents=_lowercase )
UpperCamelCase_ = pickle.dumps(_lowercase )
pickle.loads(_lowercase )
def UpperCAmelCase_ ( self )-> str:
if not self.test_rust_tokenizer:
return
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = self.get_rust_tokenizer()
UpperCamelCase_ = "I was born in 92000, and this is falsé."
UpperCamelCase_ = tokenizer.tokenize(_lowercase )
UpperCamelCase_ = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
UpperCamelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
UpperCamelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
UpperCamelCase_ = self.get_rust_tokenizer()
UpperCamelCase_ = tokenizer.encode(_lowercase )
UpperCamelCase_ = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = "Hello World!"
UpperCamelCase_ = [2, 31_227, 4_447, 35]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
UpperCamelCase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def UpperCAmelCase_ ( self )-> Union[str, Any]:
# fmt: off
UpperCamelCase_ = {
"input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="facebook/xglm-564M" , padding=_lowercase , )
| 60 | 0 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if n_term == "":
return []
__A = []
for temp in range(int(__UpperCamelCase ) ):
series.append(f'1/{temp + 1}' if series else '''1''' )
return series
if __name__ == "__main__":
lowercase_ = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 266 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowercase_ = imread(R'digital_image_processing/image_data/lena_small.jpg')
lowercase_ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase ( ):
"""simple docstring"""
__A = cn.convert_to_negative(__UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase ( ):
"""simple docstring"""
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__UpperCamelCase , 1_1_0 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase ( ):
"""simple docstring"""
__A = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__A = canny.canny(__UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase ( ):
"""simple docstring"""
assert gg.gaussian_filter(__UpperCamelCase , 5 , sigma=0.9 ).all()
def lowerCAmelCase ( ):
"""simple docstring"""
__A = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__A = conv.img_convolve(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase )
assert res.any()
def lowerCAmelCase ( ):
"""simple docstring"""
assert med.median_filter(__UpperCamelCase , 3 ).any()
def lowerCAmelCase ( ):
"""simple docstring"""
__A , __A = sob.sobel_filter(__UpperCamelCase )
assert grad.any() and theta.any()
def lowerCAmelCase ( ):
"""simple docstring"""
__A = sp.make_sepia(__UpperCamelCase , 2_0 )
assert sepia.all()
def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
__A = bs.Burkes(imread(__UpperCamelCase , 1 ) , 1_2_0 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
__A = rs.NearestNeighbour(imread(__UpperCamelCase , 1 ) , 4_0_0 , 2_0_0 )
nn.process()
assert nn.output.any()
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
__A = imread(__UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__A = 0
__A = 0
__A = image[x_coordinate][y_coordinate]
__A = lbp.get_neighbors_pixel(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__A = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__A = lbp.local_binary_value(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
assert lbp_image.any()
| 266 | 1 |
'''simple docstring'''
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = n
A: Tuple = [None] * self.n
A: Optional[Any] = 0 # index of the first element
A: Optional[int] = 0
A: List[Any] = 0
def __len__( self : Tuple ) -> int:
'''simple docstring'''
return self.size
def _snake_case ( self : Union[str, Any] ) -> bool:
'''simple docstring'''
return self.size == 0
def _snake_case ( self : str ) -> int:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
A: Optional[int] = data
A: str = (self.rear + 1) % self.n
self.size += 1
return self
def _snake_case ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''' )
A: int = self.array[self.front]
A: Union[str, Any] = None
A: Tuple = (self.front + 1) % self.n
self.size -= 1
return temp
| 334 |
'''simple docstring'''
from collections import deque
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None:
'''simple docstring'''
A: Union[str, Any] = process_name # process name
A: List[str] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
A: Dict = arrival_time
A: Optional[Any] = burst_time # remaining burst time
A: Any = 0 # total time of the process wait in ready queue
A: Any = 0 # time from arrival time to completion time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ) -> None:
'''simple docstring'''
A: Dict = number_of_queues
# time slice of queues that round robin algorithm applied
A: int = time_slices
# unfinished process is in this ready_queue
A: Tuple = queue
# current time
A: int = current_time
# finished process is in this sequence queue
A: deque[Process] = deque()
def _snake_case ( self : List[Any] ) -> list[str]:
'''simple docstring'''
A: str = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]:
'''simple docstring'''
A: Optional[int] = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]:
'''simple docstring'''
A: Any = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]:
'''simple docstring'''
A: List[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> list[int]:
'''simple docstring'''
return [q.burst_time for q in queue]
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Process ) -> int:
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> deque[Process]:
'''simple docstring'''
A: deque[Process] = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE_ ) != 0:
A: Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
A: Any = 0
# set the process's turnaround time because it is finished
A: int = self.current_time - cp.arrival_time
# set the completion time
A: List[str] = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ) -> tuple[deque[Process], deque[Process]]:
'''simple docstring'''
A: deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
A: Dict = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
A: Optional[Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
A: int = 0
# set the finish time
A: Union[str, Any] = self.current_time
# update the process' turnaround time because it is finished
A: Tuple = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _snake_case ( self : Optional[Any] ) -> deque[Process]:
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
A , A: Optional[Any] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
UpperCamelCase = Process('''P1''', 0, 53)
UpperCamelCase = Process('''P2''', 0, 17)
UpperCamelCase = Process('''P3''', 0, 68)
UpperCamelCase = Process('''P4''', 0, 24)
UpperCamelCase = 3
UpperCamelCase = [17, 25]
UpperCamelCase = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
UpperCamelCase = Process('''P1''', 0, 53)
UpperCamelCase = Process('''P2''', 0, 17)
UpperCamelCase = Process('''P3''', 0, 68)
UpperCamelCase = Process('''P4''', 0, 24)
UpperCamelCase = 3
UpperCamelCase = [17, 25]
UpperCamelCase = deque([Pa, Pa, Pa, Pa])
UpperCamelCase = MLFQ(number_of_queues, time_slices, queue, 0)
UpperCamelCase = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
f'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 334 | 1 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowercase__ : Any = logging.get_logger(__name__)
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : List[str] = None
@experimental
def a__ ( lowercase : Union[str, Any], lowercase : Optional[int], lowercase : Tuple, lowercase : List[Any], lowercase : Dict, lowercase : Union[str, Any], lowercase : Optional[Any] ) -> int:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
return _map_with_joblib(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
def a__ ( lowercase : Dict, lowercase : str, lowercase : Union[str, Any], lowercase : Optional[Any], lowercase : Optional[int], lowercase : Optional[Any], lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = num_proc if num_proc <= len(lowercase ) else len(lowercase )
_UpperCamelCase = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowercase ):
_UpperCamelCase = len(lowercase ) // num_proc
_UpperCamelCase = len(lowercase ) % num_proc
_UpperCamelCase = div * index + min(lowercase, lowercase )
_UpperCamelCase = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowercase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(lowercase )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
_UpperCamelCase , _UpperCamelCase = None, None
if not disable_tqdm:
_UpperCamelCase , _UpperCamelCase = (RLock(),), tqdm.set_lock
with Pool(lowercase, initargs=lowercase, initializer=lowercase ) as pool:
_UpperCamelCase = pool.map(lowercase, lowercase )
logger.info(F"""Finished {num_proc} processes""" )
_UpperCamelCase = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(lowercase )} objects""" )
return mapped
def a__ ( lowercase : str, lowercase : Tuple, lowercase : List[str], lowercase : List[str], lowercase : Any, lowercase : int, lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=lowercase ):
return joblib.Parallel()(
joblib.delayed(lowercase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def a__ ( lowercase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_UpperCamelCase = None
| 324 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowercase__ : Optional[Any] = logging.getLogger()
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def a__ ( lowercase : Dict ) -> int:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = os.path.join(lowercase, '''all_results.json''' )
if os.path.exists(lowercase ):
with open(lowercase, '''r''' ) as f:
_UpperCamelCase = json.load(lowercase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowercase__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Tuple ) -> int:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''translation_no_trainer''' ) ) )
@slow
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase__ )
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''image_classification_no_trainer''' ) ) )
| 324 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A: Optional[Any] = logging.get_logger(__name__)
A: Optional[int] = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : int = 'layoutlmv3'
def __init__( self , _SCREAMING_SNAKE_CASE=50265 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
vocab_size=_SCREAMING_SNAKE_CASE , hidden_size=_SCREAMING_SNAKE_CASE , num_hidden_layers=_SCREAMING_SNAKE_CASE , num_attention_heads=_SCREAMING_SNAKE_CASE , intermediate_size=_SCREAMING_SNAKE_CASE , hidden_act=_SCREAMING_SNAKE_CASE , hidden_dropout_prob=_SCREAMING_SNAKE_CASE , attention_probs_dropout_prob=_SCREAMING_SNAKE_CASE , max_position_embeddings=_SCREAMING_SNAKE_CASE , type_vocab_size=_SCREAMING_SNAKE_CASE , initializer_range=_SCREAMING_SNAKE_CASE , layer_norm_eps=_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : List[str] = max_ad_position_embeddings
UpperCAmelCase : List[Any] = coordinate_size
UpperCAmelCase : List[Any] = shape_size
UpperCAmelCase : Any = has_relative_attention_bias
UpperCAmelCase : Optional[Any] = rel_pos_bins
UpperCAmelCase : int = max_rel_pos
UpperCAmelCase : int = has_spatial_attention_bias
UpperCAmelCase : Optional[int] = rel_ad_pos_bins
UpperCAmelCase : str = max_rel_ad_pos
UpperCAmelCase : List[Any] = text_embed
UpperCAmelCase : Tuple = visual_embed
UpperCAmelCase : List[Any] = input_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : Dict = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[int] = version.parse('1.12' )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def SCREAMING_SNAKE_CASE ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return 12
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 40 , _SCREAMING_SNAKE_CASE = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , """apply_ocr""" , _SCREAMING_SNAKE_CASE )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase : str = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase : Any = processor.tokenizer.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase : Optional[Any] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase : Tuple = self._generate_dummy_images(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = dict(
processor(
_SCREAMING_SNAKE_CASE , text=_SCREAMING_SNAKE_CASE , boxes=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , ) )
return inputs
| 76 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _snake_case ( UpperCamelCase : list[list[float]] ):
UpperCAmelCase : int = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCAmelCase : Union[str, Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
UpperCAmelCase : Dict = [[0.0, 0.0], [0.0, 0.0]]
UpperCAmelCase , UpperCAmelCase : Dict = matrix[1][1], matrix[0][0]
UpperCAmelCase , UpperCAmelCase : Optional[Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCAmelCase : Optional[int] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
UpperCAmelCase : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCAmelCase : Dict = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCAmelCase : List[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCAmelCase : int = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCAmelCase : Dict = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCAmelCase : Optional[int] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCAmelCase : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCAmelCase : Optional[Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCAmelCase : str = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCAmelCase : Optional[Any] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCAmelCase : Any = array(UpperCamelCase )
for i in range(3 ):
for j in range(3 ):
UpperCAmelCase : Optional[int] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCAmelCase : int = array(UpperCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(UpperCamelCase )
# Calculate the inverse of the matrix
return [[float(d(UpperCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 76 | 1 |
from ...processing_utils import ProcessorMixin
class __a ( UpperCAmelCase ):
_a : Optional[Any] = 'WhisperFeatureExtractor'
_a : str = 'WhisperTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.feature_extractor
_UpperCAmelCase = False
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True ) -> str:
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=_SCREAMING_SNAKE_CASE , language=_SCREAMING_SNAKE_CASE , no_timestamps=_SCREAMING_SNAKE_CASE )
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('audio' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('sampling_rate' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_UpperCAmelCase = self.feature_extractor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCAmelCase = encodings['input_ids']
return inputs
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="np" ) -> str:
"""simple docstring"""
return self.tokenizer.get_prompt_ids(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
| 329 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> Dict:
'''simple docstring'''
_lowercase =feature_size
_lowercase =sampling_rate
_lowercase =padding_value
_lowercase =kwargs.pop('padding_side' , 'right' )
_lowercase =kwargs.pop('return_attention_mask' , lowerCAmelCase )
super().__init__(**lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ) -> BatchFeature:
'''simple docstring'''
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_lowercase ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F''' to this method that includes {self.model_input_names[0]}, but you provided'''
F''' {list(processed_features.keys() )}''' )
_lowercase =processed_features[self.model_input_names[0]]
_lowercase =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCAmelCase ) == 0:
if return_attention_mask:
_lowercase =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_lowercase =required_input[0]
if isinstance(lowerCAmelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_lowercase =0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCAmelCase ):
_lowercase =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCAmelCase ):
_lowercase ='tf'
elif is_torch_tensor(lowerCAmelCase ):
_lowercase ='pt'
elif isinstance(lowerCAmelCase , (int, float, list, tuple, np.ndarray) ):
_lowercase ='np'
else:
raise ValueError(
F'''type of {first_element} unknown: {type(lowerCAmelCase )}. '''
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_lowercase =to_numpy(lowerCAmelCase )
else:
_lowercase =[to_numpy(lowerCAmelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
_lowercase =self._get_padding_strategies(padding=lowerCAmelCase , max_length=lowerCAmelCase )
_lowercase =processed_features[self.model_input_names[0]]
_lowercase =len(lowerCAmelCase )
if not all(len(lowerCAmelCase ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
_lowercase =[]
for i in range(lowerCAmelCase ):
_lowercase ={k: v[i] for k, v in processed_features.items()}
# truncation
_lowercase =self._truncate(
lowerCAmelCase , max_length=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , )
truncated_inputs.append(lowerCAmelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_lowercase =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_lowercase =PaddingStrategy.MAX_LENGTH
_lowercase ={}
for i in range(lowerCAmelCase ):
# padding
_lowercase =self._pad(
truncated_inputs[i] , max_length=lowerCAmelCase , padding_strategy=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
_lowercase =[]
if value.dtype is np.dtype(np.floataa ):
_lowercase =value.astype(np.floataa )
batch_outputs[key].append(lowerCAmelCase )
return BatchFeature(lowerCAmelCase , tensor_type=lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase = None , lowerCAmelCase = None , ) -> dict:
'''simple docstring'''
_lowercase =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_lowercase =len(lowerCAmelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowercase =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowercase =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCAmelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_lowercase =np.ones(len(lowerCAmelCase ) , dtype=np.intaa )
if needs_to_be_padded:
_lowercase =max_length - len(lowerCAmelCase )
if self.padding_side == "right":
if return_attention_mask:
_lowercase =np.pad(
processed_features['attention_mask'] , (0, difference) )
_lowercase =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_lowercase =np.pad(
lowerCAmelCase , lowerCAmelCase , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_lowercase =np.pad(
processed_features['attention_mask'] , (difference, 0) )
_lowercase =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_lowercase =np.pad(
lowerCAmelCase , lowerCAmelCase , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ) -> Any:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
_lowercase =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowercase =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowercase =len(lowerCAmelCase ) > max_length
if needs_to_be_truncated:
_lowercase =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_lowercase =processed_features['attention_mask'][:max_length]
return processed_features
def A__ ( self , lowerCAmelCase=False , lowerCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
if padding is not False:
if padding is True:
_lowercase =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCAmelCase , lowerCAmelCase ):
_lowercase =PaddingStrategy(lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
_lowercase =padding
else:
_lowercase =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 205 | 0 |
"""simple docstring"""
from PIL import Image
def lowerCamelCase_ (UpperCamelCase__ : Image ):
_UpperCAmelCase , _UpperCAmelCase : int = image.size
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : int = image.load()
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_UpperCAmelCase : int = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(UpperCamelCase__ ):
for i in range(UpperCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_lowerCAmelCase :Union[str, Any] = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 68 |
"""simple docstring"""
from itertools import count
def lowerCamelCase_ (UpperCamelCase__ : int = 50 ):
_UpperCAmelCase : Tuple = [1] * min_block_length
for n in count(UpperCamelCase__ ):
fill_count_functions.append(1 )
for block_length in range(UpperCamelCase__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(f"{solution() = }")
| 68 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__="None" , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = relative_attention
__SCREAMING_SNAKE_CASE = position_biased_input
__SCREAMING_SNAKE_CASE = pos_att_type
__SCREAMING_SNAKE_CASE = scope
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = 3_0_0
return config
def snake_case_ ( self , lowerCAmelCase__):
self.parent.assertListEqual(list(result.loss.size()) , [])
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = DebertaModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__)[0]
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__)[0]
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)[0]
self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size])
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = DebertaForMaskedLM(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaForSequenceClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels])
self.check_loss_output(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaForTokenClassification(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = DebertaForQuestionAnswering(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Dict = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase : Any = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : int = True
__lowercase : Optional[int] = False
__lowercase : str = False
__lowercase : Tuple = False
__lowercase : Optional[int] = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = DebertaModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase__)
@slow
def snake_case_ ( self):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""")
def snake_case_ ( self):
pass
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained("""microsoft/deberta-base""")
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]])
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)[0]
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4) , f"{output[:, 1:4, 1:4]}")
| 100 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__UpperCAmelCase =["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 0 |
def __lowerCamelCase ( lowerCamelCase__ : float , lowerCamelCase__ : float ):
'''simple docstring'''
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(lowerCamelCase__ ) * abs(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 358 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Dict = StableDiffusionPanoramaPipeline
UpperCamelCase : List[Any] = TEXT_TO_IMAGE_PARAMS
UpperCamelCase : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __A ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowerCamelCase = DDIMScheduler()
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCamelCase = CLIPTextModel(A )
lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __A ( self , A , A=0 ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = """french fries"""
lowerCamelCase = sd_pipe(**A , negative_prompt=A )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A , view_batch_size=2 )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=A )
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , A=0 ) -> Dict:
'''simple docstring'''
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase = DDIMScheduler.from_pretrained(A , subfolder="""scheduler""" )
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(A , scheduler=A , safety_checker=A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = self.get_inputs()
lowerCamelCase = pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowerCamelCase = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=A )
lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = self.get_inputs()
lowerCamelCase = pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowerCamelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = 0
def callback_fn(A , A , A ) -> None:
lowerCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowerCamelCase = latents[0, -3:, -3:, -1]
lowerCamelCase = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowerCamelCase = latents[0, -3:, -3:, -1]
lowerCamelCase = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCamelCase = False
lowerCamelCase = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase = DDIMScheduler.from_pretrained(A , subfolder="""scheduler""" )
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(A , scheduler=A , safety_checker=A )
lowerCamelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = self.get_inputs()
pipe(**A , callback=A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __A ( self ) -> str:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase = DDIMScheduler.from_pretrained(A , subfolder="""scheduler""" )
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(A , scheduler=A , safety_checker=A )
lowerCamelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase = self.get_inputs()
lowerCamelCase = pipe(**A )
lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 66 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class _snake_case ( a__ ):
snake_case__ = "markuplm"
def __init__( self : Union[str, Any] , UpperCAmelCase : Tuple=30522 , UpperCAmelCase : str=768 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : List[Any]=12 , UpperCAmelCase : Tuple=3072 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : str=2 , UpperCAmelCase : str=0.0_2 , UpperCAmelCase : Any=1E-12 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Tuple=0 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Optional[int]=256 , UpperCAmelCase : Any=1024 , UpperCAmelCase : int=216 , UpperCAmelCase : Any=1001 , UpperCAmelCase : int=32 , UpperCAmelCase : Dict=50 , UpperCAmelCase : Union[str, Any]="absolute" , UpperCAmelCase : str=True , UpperCAmelCase : Tuple=None , **UpperCAmelCase : List[str] , ):
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase , )
__lowerCamelCase : Tuple = vocab_size
__lowerCamelCase : int = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Optional[int] = num_attention_heads
__lowerCamelCase : Optional[Any] = hidden_act
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : int = type_vocab_size
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : Tuple = layer_norm_eps
__lowerCamelCase : str = position_embedding_type
__lowerCamelCase : Union[str, Any] = use_cache
__lowerCamelCase : List[Any] = classifier_dropout
# additional properties
__lowerCamelCase : List[Any] = max_depth
__lowerCamelCase : int = max_xpath_tag_unit_embeddings
__lowerCamelCase : List[str] = max_xpath_subs_unit_embeddings
__lowerCamelCase : List[Any] = tag_pad_id
__lowerCamelCase : Tuple = subs_pad_id
__lowerCamelCase : Tuple = xpath_unit_hidden_size | 135 | """simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 135 | 1 |
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
SCREAMING_SNAKE_CASE__ = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
SCREAMING_SNAKE_CASE__ = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
SCREAMING_SNAKE_CASE__ = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
SCREAMING_SNAKE_CASE__ = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
snake_case = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
snake_case = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
snake_case = self.config_name.upper()
else:
raise KeyError(
F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
snake_case = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
snake_case = score.BleurtScorer(os.path.join(lowerCAmelCase , lowerCAmelCase ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.scorer.score(references=lowerCAmelCase , candidates=lowerCAmelCase )
return {"scores": scores}
| 149 | """simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
pass
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = data
snake_case = None
def __iter__( self ):
"""simple docstring"""
snake_case = self
snake_case = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCAmelCase )
yield node.data
snake_case = node.next_node
@property
def snake_case ( self ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = Node(1)
SCREAMING_SNAKE_CASE__ = Node(2)
SCREAMING_SNAKE_CASE__ = Node(3)
SCREAMING_SNAKE_CASE__ = Node(4)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE__ = root_node.next_node
print(root_node.has_loop) # True
SCREAMING_SNAKE_CASE__ = Node(5)
SCREAMING_SNAKE_CASE__ = Node(6)
SCREAMING_SNAKE_CASE__ = Node(5)
SCREAMING_SNAKE_CASE__ = Node(6)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE__ = Node(1)
print(root_node.has_loop) # False
| 149 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] )->List[Any]:
'''simple docstring'''
snake_case_ = "huggingface/label-files"
snake_case_ = "imagenet-1k-id2label.json"
snake_case_ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
snake_case_ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case_ = BitConfig(
conv_layer=lowerCAmelCase_ , num_labels=1_000 , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ , )
return config
def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] )->Tuple:
'''simple docstring'''
if "stem.conv" in name:
snake_case_ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
snake_case_ = name.replace("blocks" , "layers" )
if "head.fc" in name:
snake_case_ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
snake_case_ = "bit." + name
if "bit" not in name and "classifier" not in name:
snake_case_ = "bit.encoder." + name
return name
def _lowerCAmelCase ( )->str:
'''simple docstring'''
snake_case_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[Any] , lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :Tuple=False )->List[Any]:
'''simple docstring'''
snake_case_ = get_config(lowerCAmelCase_ )
# load original model from timm
snake_case_ = create_model(lowerCAmelCase_ , pretrained=lowerCAmelCase_ )
timm_model.eval()
# load state_dict of original model
snake_case_ = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case_ = state_dict.pop(lowerCAmelCase_ )
snake_case_ = val.squeeze() if "head" in key else val
# load HuggingFace model
snake_case_ = BitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# create image processor
snake_case_ = create_transform(**resolve_data_config({} , model=lowerCAmelCase_ ) )
snake_case_ = transform.transforms
snake_case_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
snake_case_ = BitImageProcessor(
do_resize=lowerCAmelCase_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case_ = prepare_img()
snake_case_ = transform(lowerCAmelCase_ ).unsqueeze(0 )
snake_case_ = processor(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ )
# verify logits
with torch.no_grad():
snake_case_ = model(lowerCAmelCase_ )
snake_case_ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case_ = timm_model(lowerCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 159 |
from __future__ import annotations
from collections import namedtuple
def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :float , lowerCAmelCase_ :float )->tuple:
'''simple docstring'''
snake_case_ = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159 | 1 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="resnet50" , lowerCamelCase__=3 , lowerCamelCase__=32 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , ) -> str:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = out_indices if out_indices is not None else [4]
__lowerCamelCase = stage_names
__lowerCamelCase = out_features
__lowerCamelCase = backbone
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = use_pretrained_backbone
__lowerCamelCase = is_training
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = self.get_config()
return config, pixel_values
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = TimmBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (TimmBackbone,) if is_torch_available() else ()
snake_case_ = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = TimmBackboneModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = 'resnet18'
__lowerCamelCase = 'microsoft/resnet-18'
__lowerCamelCase = AutoBackbone.from_pretrained(lowerCamelCase__ , use_timm_backbone=lowerCamelCase__ )
__lowerCamelCase = AutoBackbone.from_pretrained(lowerCamelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCamelCase = AutoBackbone.from_pretrained(lowerCamelCase__ , use_timm_backbone=lowerCamelCase__ , out_indices=[1, 2, 3] )
__lowerCamelCase = AutoBackbone.from_pretrained(lowerCamelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def lowercase_ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase_ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('Safetensors is not supported by timm.' )
def lowercase_ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ) -> int:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
__lowerCamelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCamelCase = self.all_model_classes[0]
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
__lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ )
__lowerCamelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCamelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCamelCase = copy.deepcopy(lowerCamelCase__ )
__lowerCamelCase = None
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCamelCase = copy.deepcopy(lowerCamelCase__ )
__lowerCamelCase = False
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(**lowerCamelCase__ )
| 365 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__lowerCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__lowerCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__lowerCamelCase = model(input_ids.to(lowerCamelCase__ ) , labels=labels.to(lowerCamelCase__ ) ).loss
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348 | 0 |
def lowerCamelCase__ ( _a , _a):
# Check if the input is valid
if not len(_a) == len(_a) == 3:
raise ValueError("Please enter a valid equation.")
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero.")
# Extract the coefficients
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = equationa
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = equationa
# Calculate the determinants of the matrices
SCREAMING_SNAKE_CASE : Optional[Any] = aa * ba - aa * ba
SCREAMING_SNAKE_CASE : Union[str, Any] = ca * ba - ca * ba
SCREAMING_SNAKE_CASE : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)")
else:
raise ValueError("No solution. (Inconsistent system)")
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
SCREAMING_SNAKE_CASE : List[str] = determinant_x / determinant
SCREAMING_SNAKE_CASE : Union[str, Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y) | 76 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
SCREAMING_SNAKE_CASE = True
from torch.cuda.amp import autocast
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Whether to log verbose messages or not.'''}, )
lowercase__ = field(
default=2.0, metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
lowercase__ = field(
default=0.5, metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
lowercase__ = field(
default=0.99_99_95, metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
A__ = logging.WARNING
if model_args.verbose_logging:
A__ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
A__ = logging.INFO
logger.setLevel(lowercase_ )
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
default=A_, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowercase__ = field(
default='''train''', metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
}, )
lowercase__ = field(
default='''validation''', metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
}, )
lowercase__ = field(
default='''file''', metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowercase__ = field(
default=1, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
lowercase__ = field(
default=20.0, metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class UpperCAmelCase_ :
lowercase__ = 42
lowercase__ = 42
lowercase__ = "longest"
lowercase__ = None
lowercase__ = None
def __call__( self : Tuple , snake_case_ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A__ = self.feature_extractor.pad(
snake_case_ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
A__ = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
A__ = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
A__ = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
A__ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
A__ = 1
A__ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
A__ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=snake_case_ , min_masks=2 , )
return batch
class UpperCAmelCase_ ( A_ ):
def __init__( self : Any , *snake_case_ : Dict , snake_case_ : Optional[int]=1 , snake_case_ : str=0 , snake_case_ : str=1.0 , **snake_case_ : List[str] ) -> List[str]:
'''simple docstring'''
super().__init__(*snake_case_ , **snake_case_ )
A__ = 0
A__ = max_gumbel_temp
A__ = min_gumbel_temp
A__ = gumbel_temp_decay
def __magic_name__ ( self : Tuple , snake_case_ : nn.Module , snake_case_ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
A__ = self._prepare_inputs(snake_case_ )
if self.use_amp:
with autocast():
A__ = self.compute_loss(snake_case_ , snake_case_ )
else:
A__ = self.compute_loss(snake_case_ , snake_case_ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
A__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
A__ = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
A__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(snake_case_ ).backward()
elif self.use_apex:
with amp.scale_loss(snake_case_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(snake_case_ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__, A__, A__ = parser.parse_args_into_dataclasses()
configure_logger(lowercase_ , lowercase_ )
# Downloading and loading a dataset from the hub.
A__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
A__ = DatasetDict()
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
A__ = DatasetDict()
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
A__ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowercase_ )
def prepare_dataset(lowercase_ ):
# check that all files have the correct sampling rate
A__, A__ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
A__ = datasets.map(
lowercase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
A__ = vectorized_datasets.filter(
lambda lowercase_ : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowercase_ ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
A__ = vectorized_datasets.map(
lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
A__ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
A__ = WavaVecaForPreTraining(lowercase_ )
A__ = DataCollatorForWavaVecaPretraining(model=lowercase_ , feature_extractor=lowercase_ )
A__ = WavaVecaPreTrainer(
model=lowercase_ , data_collator=lowercase_ , args=lowercase_ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=lowercase_ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 247 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : int = logging.get_logger()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = True ):
print(f'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_2_8:
if name[-1] == "S":
_UpperCamelCase : Optional[Any] = timm.create_model('levit_128s' , pretrained=UpperCAmelCase_ )
else:
_UpperCamelCase : Any = timm.create_model('levit_128' , pretrained=UpperCAmelCase_ )
if hidden_sizes == 1_9_2:
_UpperCamelCase : Any = timm.create_model('levit_192' , pretrained=UpperCAmelCase_ )
if hidden_sizes == 2_5_6:
_UpperCamelCase : Tuple = timm.create_model('levit_256' , pretrained=UpperCAmelCase_ )
if hidden_sizes == 3_8_4:
_UpperCamelCase : int = timm.create_model('levit_384' , pretrained=UpperCAmelCase_ )
from_model.eval()
_UpperCamelCase : Dict = LevitForImageClassificationWithTeacher(UpperCAmelCase_ ).eval()
_UpperCamelCase : Optional[Any] = OrderedDict()
_UpperCamelCase : int = from_model.state_dict()
_UpperCamelCase : Any = list(from_model.state_dict().keys() )
_UpperCamelCase : Tuple = list(our_model.state_dict().keys() )
print(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for i in range(len(UpperCAmelCase_ ) ):
_UpperCamelCase : Dict = weights[og_keys[i]]
our_model.load_state_dict(UpperCAmelCase_ )
_UpperCamelCase : List[str] = torch.randn((2, 3, 2_2_4, 2_2_4) )
_UpperCamelCase : List[str] = from_model(UpperCAmelCase_ )
_UpperCamelCase : List[Any] = our_model(UpperCAmelCase_ ).logits
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ ), "The model logits don't match the original one."
_UpperCamelCase : Any = name
print(UpperCAmelCase_ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_UpperCamelCase : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'Pushed {checkpoint_name}' )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = True ):
_UpperCamelCase : int = 'imagenet-1k-id2label.json'
_UpperCamelCase : int = 1_0_0_0
_UpperCamelCase : Dict = (1, num_labels)
_UpperCamelCase : Dict = 'huggingface/label-files'
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Any = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_UpperCamelCase : Any = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : List[str] = idalabel
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
_UpperCamelCase : str = partial(UpperCAmelCase_ , num_labels=UpperCAmelCase_ , idalabel=UpperCAmelCase_ , labelaid=UpperCAmelCase_ )
_UpperCamelCase : Tuple = {
'levit-128S': 1_2_8,
'levit-128': 1_2_8,
'levit-192': 1_9_2,
'levit-256': 2_5_6,
'levit-384': 3_8_4,
}
_UpperCamelCase : Optional[Any] = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 8, 1_2] , depths=[4, 4, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_9_2, 2_8_8, 3_8_4] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_5_6, 3_8_4, 5_1_2] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_8_4, 5_1_2, 7_6_8] , num_attention_heads=[6, 9, 1_2] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCAmelCase_ , names_to_config[model_name] , UpperCAmelCase_ , UpperCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
snake_case_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
snake_case_ : Dict = parser.parse_args()
snake_case_ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 236 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case_ : Optional[Any] = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = ['CLIPFeatureExtractor']
snake_case_ : Dict = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
snake_case_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 236 | 1 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase : Optional[int] = get_tests_dir("""fixtures/dummy-config.json""")
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Tuple = 0
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__)
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto'))
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = AutoConfig.from_pretrained('bert-base-uncased')
self.assertIsInstance(lowercase , lowercase)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsInstance(lowercase , lowercase)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Any = AutoConfig.from_pretrained(lowercase)
self.assertIsInstance(lowercase , lowercase)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple = AutoConfig.for_model('roberta')
self.assertIsInstance(lowercase , lowercase)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
a__ : List[str] = os.path.join(lowercase , 'fake-roberta')
os.makedirs(lowercase , exist_ok=lowercase)
with open(os.path.join(lowercase , 'config.json') , 'w') as f:
f.write(json.dumps({}))
a__ : List[str] = AutoConfig.from_pretrained(lowercase)
self.assertEqual(type(lowercase) , lowercase)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
try:
AutoConfig.register('custom' , lowercase)
# Wrong model type will raise an error
with self.assertRaises(lowercase):
AutoConfig.register('model' , lowercase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase):
AutoConfig.register('bert' , lowercase)
# Now that the config is registered, it can be used as any other config with the auto-API
a__ : int = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase)
a__ : Optional[int] = AutoConfig.from_pretrained(lowercase)
self.assertIsInstance(lowercase , lowercase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , 'bert-base is not a local folder and is not a valid model identifier'):
a__ : int = AutoConfig.from_pretrained('bert-base')
def __lowercase ( self) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
a__ : Union[str, Any] = AutoConfig.from_pretrained(lowercase , revision='aaaaaa')
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
a__ : str = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo')
def __lowercase ( self) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowercase):
a__ : Optional[int] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model')
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase):
a__ : Union[str, Any] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase)
a__ : Tuple = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase)
self.assertEqual(config.__class__.__name__ , 'NewModelConfig')
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase)
a__ : Optional[int] = AutoConfig.from_pretrained(lowercase , trust_remote_code=lowercase)
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig')
def __lowercase ( self) -> str:
'''simple docstring'''
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Tuple = '''new-model'''
try:
AutoConfig.register('new-model' , lowercase)
# If remote code is not set, the default is to use local
a__ : Any = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model')
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal')
# If remote code is disabled, we load the local one.
a__ : Optional[Any] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase)
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal')
# If remote is enabled, we load from the Hub
a__ : int = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase)
self.assertEqual(config.__class__.__name__ , 'NewModelConfig')
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 99 |
"""simple docstring"""
import numpy as np
def _snake_case ( _snake_case : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case : Any =1_6
__snake_case : Tuple =3_2
def lowerCAmelCase__ ( lowerCamelCase_ : Accelerator ,lowerCamelCase_ : int = 16 ,lowerCamelCase_ : str = "bert-base-cased"):
'''simple docstring'''
lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(A__)
lowerCAmelCase__ : Tuple = load_dataset('''glue''' ,'''mrpc''')
def tokenize_function(lowerCamelCase_ : Dict):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : Tuple = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=A__ ,max_length=A__)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ : Any = datasets.map(
A__ ,batched=A__ ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,load_from_cache_file=A__)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : Tuple = tokenized_datasets.rename_column('''label''' ,'''labels''')
def collate_fn(lowerCamelCase_ : List[str]):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ ,padding='''max_length''' ,max_length=128 ,return_tensors='''pt''')
return tokenizer.pad(A__ ,padding='''longest''' ,return_tensors='''pt''')
# Instantiate dataloaders.
lowerCAmelCase__ : Tuple = DataLoader(
tokenized_datasets['''train'''] ,shuffle=A__ ,collate_fn=A__ ,batch_size=A__)
lowerCAmelCase__ : int = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=A__ ,collate_fn=A__ ,batch_size=A__)
return train_dataloader, eval_dataloader
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : Tuple ,lowerCamelCase_ : int ,lowerCamelCase_ : Union[str, Any]):
'''simple docstring'''
model.eval()
lowerCAmelCase__ : int = 0
for step, batch in enumerate(A__):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
lowerCAmelCase__ : Any = model(**A__)
lowerCAmelCase__ : Tuple = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
lowerCAmelCase__ : Dict = accelerator.gather(
(predictions, batch['''labels'''])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__) - 1:
lowerCAmelCase__ : List[str] = predictions[: len(eval_dataloader.dataset) - samples_seen]
lowerCAmelCase__ : Union[str, Any] = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ ,references=A__ ,)
lowerCAmelCase__ : Optional[int] = metric.compute()
return eval_metric["accuracy"]
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : Tuple):
'''simple docstring'''
lowerCAmelCase__ : List[str] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Union[str, Any] = config["""lr"""]
lowerCAmelCase__ : List[Any] = int(config['''num_epochs'''])
lowerCAmelCase__ : Tuple = int(config['''seed'''])
lowerCAmelCase__ : Union[str, Any] = int(config['''batch_size'''])
lowerCAmelCase__ : List[str] = args.model_name_or_path
set_seed(A__)
lowerCAmelCase__ : List[Any] = get_dataloaders(A__ ,A__ ,A__)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : int = AutoModelForSequenceClassification.from_pretrained(A__ ,return_dict=A__)
# Instantiate optimizer
lowerCAmelCase__ : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase__ : Union[str, Any] = optimizer_cls(params=model.parameters() ,lr=A__)
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : List[Any] = (len(A__) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase__ : List[str] = get_linear_schedule_with_warmup(
optimizer=A__ ,num_warmup_steps=0 ,num_training_steps=A__ ,)
else:
lowerCAmelCase__ : List[Any] = DummyScheduler(A__ ,total_num_steps=A__ ,warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ : Optional[int] = accelerator.prepare(
A__ ,A__ ,A__ ,A__ ,A__)
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase__ : str = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : Any = evaluate.load('''glue''' ,'''mrpc''')
lowerCAmelCase__ : List[Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase__ : Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint)
lowerCAmelCase__ : List[str] = args.resume_from_checkpoint.split('''epoch_''')[1]
lowerCAmelCase__ : List[str] = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase__ : Optional[int] = int(A__) + 1
lowerCAmelCase__ : List[Any] = evaluation_loop(A__ ,A__ ,A__ ,A__)
accelerator.print('''resumed checkpoint performance:''' ,A__)
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' ,lr_scheduler.get_lr()[0])
accelerator.print('''resumed optimizers\'s lr:''' ,optimizer.param_groups[0]['''lr'''])
with open(os.path.join(args.output_dir ,f"""state_{starting_epoch-1}.json""") ,'''r''') as f:
lowerCAmelCase__ : Any = json.load(A__)
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase__ : int = {}
for epoch in range(A__ ,A__):
model.train()
for step, batch in enumerate(A__):
lowerCAmelCase__ : Optional[int] = model(**A__)
lowerCAmelCase__ : Dict = outputs.loss
lowerCAmelCase__ : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(A__)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase__ : str = f"""epoch_{epoch}"""
lowerCAmelCase__ : Union[str, Any] = os.path.join(args.output_dir ,A__)
accelerator.save_state(A__)
lowerCAmelCase__ : Any = evaluation_loop(A__ ,A__ ,A__ ,A__)
lowerCAmelCase__ : Any = accuracy
lowerCAmelCase__ : Optional[Any] = lr_scheduler.get_lr()[0]
lowerCAmelCase__ : Tuple = optimizer.param_groups[0]["""lr"""]
lowerCAmelCase__ : Any = epoch
lowerCAmelCase__ : Union[str, Any] = overall_step
accelerator.print(f"""epoch {epoch}:""" ,A__)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,f"""state_{epoch}.json""") ,'''w''') as f:
json.dump(A__ ,A__)
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''')
parser.add_argument(
'''--model_name_or_path''' ,type=A__ ,default='''bert-base-cased''' ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,required=A__ ,)
parser.add_argument(
'''--output_dir''' ,type=A__ ,default='''.''' ,help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' ,)
parser.add_argument(
'''--resume_from_checkpoint''' ,type=A__ ,default=A__ ,help='''If the training should continue from a checkpoint folder.''' ,)
parser.add_argument(
'''--partial_train_epoch''' ,type=A__ ,default=A__ ,help='''If passed, the training will stop after this number of epochs.''' ,)
parser.add_argument(
'''--num_epochs''' ,type=A__ ,default=2 ,help='''Number of train epochs.''' ,)
lowerCAmelCase__ : List[Any] = parser.parse_args()
lowerCAmelCase__ : Optional[int] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(A__ ,A__)
if __name__ == "__main__":
main()
| 351 |
def lowerCAmelCase__ ( lowerCamelCase_ : int = 1000000):
'''simple docstring'''
lowerCAmelCase__ : int = set(range(3 ,lowerCamelCase_ ,2))
primes.add(2)
for p in range(3 ,lowerCamelCase_ ,2):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,lowerCamelCase_ ,lowerCamelCase_)))
lowerCAmelCase__ : int = [float(lowerCamelCase_) for n in range(limit + 1)]
for p in primes:
for n in range(lowerCamelCase_ ,limit + 1 ,lowerCamelCase_):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:]))
if __name__ == "__main__":
print(f"""{solution() = }""")
| 94 | 0 |
class a_ :
"""simple docstring"""
def __init__( self : Dict ,snake_case : int ):
SCREAMING_SNAKE_CASE =n
SCREAMING_SNAKE_CASE =[None] * self.n
SCREAMING_SNAKE_CASE =0 # index of the first element
SCREAMING_SNAKE_CASE =0
SCREAMING_SNAKE_CASE =0
def __len__( self : Optional[int] ):
return self.size
def _lowerCAmelCase ( self : Union[str, Any] ):
return self.size == 0
def _lowerCAmelCase ( self : List[Any] ):
return False if self.is_empty() else self.array[self.front]
def _lowerCAmelCase ( self : str ,snake_case : Union[str, Any] ):
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
SCREAMING_SNAKE_CASE =data
SCREAMING_SNAKE_CASE =(self.rear + 1) % self.n
self.size += 1
return self
def _lowerCAmelCase ( self : List[str] ):
if self.size == 0:
raise Exception('UNDERFLOW' )
SCREAMING_SNAKE_CASE =self.array[self.front]
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =(self.front + 1) % self.n
self.size -= 1
return temp
| 334 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
@property
def _lowerCAmelCase ( self : List[Any] ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case ,'feature_size' ) )
self.assertTrue(hasattr(snake_case ,'sampling_rate' ) )
self.assertTrue(hasattr(snake_case ,'padding_value' ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(snake_case ) == len(snake_case ) for x, y in zip(snake_case ,processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Optional[Any]=False ):
def _inputs_have_equal_length(snake_case : Dict ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : str ,snake_case : Dict ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE =self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE =self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(all(len(snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
SCREAMING_SNAKE_CASE =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE =(np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int]=False ):
def _inputs_have_equal_length(snake_case : str ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : Tuple ,snake_case : Optional[Any] ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to middle
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' ,truncation=snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =12
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE =len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE =((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
def _lowerCAmelCase ( self : Optional[int] ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : Tuple ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : List[str] ):
self._check_truncation(numpify=snake_case )
def _lowerCAmelCase ( self : int ):
self._check_truncation(numpify=snake_case )
@require_torch
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =min(snake_case )
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,truncation=snake_case ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 334 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
@property
def A ( self : Optional[int] ) -> str:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A ( self : Any ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ort.SessionOptions()
_SCREAMING_SNAKE_CASE =False
return options
def A ( self : Any ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
_SCREAMING_SNAKE_CASE =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
_SCREAMING_SNAKE_CASE =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE ='A red cat sitting on a park bench'
_SCREAMING_SNAKE_CASE =np.random.RandomState(0 )
_SCREAMING_SNAKE_CASE =pipe(
prompt=_a , image=_a , mask_image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_a , output_type='np' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 371 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowerCamelCase : List[Any] = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
lowerCamelCase : List[str] = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
lowerCamelCase : Dict = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return float((preds == labels).mean() )
def _lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =simple_accuracy(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =float(fa_score(y_true=_UpperCamelCase , y_pred=_UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =float(pearsonr(_UpperCamelCase , _UpperCamelCase )[0] )
_SCREAMING_SNAKE_CASE =float(spearmanr(_UpperCamelCase , _UpperCamelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def A ( self : Optional[int] ) -> Any:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : str , _a : str , _a : List[Any] ) -> str:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "stsb":
return pearson_and_spearman(_a , _a )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_a , _a )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 114 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 76 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =CustomTokenizer
pass | 76 | 1 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowercase : str = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_80_00,
'sample_size': 6_55_36,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_80_00,
'sample_size': 6_55_36,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_80_00,
'sample_size': 13_10_72,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_60_00,
'sample_size': 6_55_36,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_60_00,
'sample_size': 6_55_36,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_60_00,
'sample_size': 6_55_36,
},
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
return torch.atana(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / math.pi * 2
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : int = torch.sin(t * math.pi / 2 ) ** 2
__a : Optional[int] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class __UpperCamelCase ( lowerCAmelCase_ ):
pass
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__()
__a : List[str] = DiffusionAttnUnetaD(__a , n_attn_layers=4 )
__a : Optional[Any] = deepcopy(self.diffusion )
__a : Any = torch.quasirandom.SobolEngine(1 , scramble=__a )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
__a : Tuple = MODELS_MAP[model_name]['url']
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
__lowercase : List[str] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
__lowercase : Optional[int] = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
__lowercase : Optional[int] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
__lowercase : Tuple = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
__lowercase : Union[str, Any] = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
__lowercase : Any = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
for key, value in ATTN_MAP.items():
if name.startswith(_SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return name.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif name.startswith(_SCREAMING_SNAKE_CASE ):
return [name.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=13 ):
__a : Optional[int] = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
__a : Dict = 0
if string.startswith('net.3.' ):
depth += 1
__a : str = string[6:]
elif string.startswith('net.' ):
__a : str = string[4:]
while string.startswith('main.7.' ):
depth += 1
__a : Optional[int] = string[7:]
if string.startswith('main.' ):
__a : Optional[Any] = string[5:]
# mid block
if string[:2].isdigit():
__a : Tuple = string[:2]
__a : Optional[int] = string[2:]
else:
__a : Optional[int] = string[0]
__a : Optional[Any] = string[1:]
if depth == max_depth:
__a : List[Any] = MID_NUM_TO_LAYER[layer_num]
__a : List[Any] = 'mid_block'
elif depth > 0 and int(_SCREAMING_SNAKE_CASE ) < 7:
__a : int = DOWN_NUM_TO_LAYER[layer_num]
__a : Dict = F"""down_blocks.{depth}"""
elif depth > 0 and int(_SCREAMING_SNAKE_CASE ) > 7:
__a : Tuple = UP_NUM_TO_LAYER[layer_num]
__a : Any = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
__a : List[str] = DEPTH_0_TO_LAYER[layer_num]
__a : List[Any] = F"""up_blocks.{max_depth - 1}""" if int(_SCREAMING_SNAKE_CASE ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
__a : Any = string_left[1:]
if "resnets" in new_layer:
__a : Dict = convert_resconv_naming(_SCREAMING_SNAKE_CASE )
elif "attentions" in new_layer:
__a : str = convert_attn_naming(_SCREAMING_SNAKE_CASE )
__a : Tuple = new_string_left
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : List[str] = prefix + '.' + new_layer + '.' + string_left
else:
__a : List[str] = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
__a : Optional[Any] = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
__a : Tuple = rename(_SCREAMING_SNAKE_CASE )
# check if we need to transform from Conv => Linear for attention
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = transform_conv_attns(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a : List[Any] = v
return new_state_dict
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] ):
if len(_SCREAMING_SNAKE_CASE ) == 1:
if len(v.shape ) == 3:
# weight
__a : List[Any] = v[:, :, 0]
else:
# bias
__a : Optional[int] = v
else:
# qkv matrices
__a : Optional[int] = v.shape[0]
__a : List[str] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__a : Dict = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__a : Optional[Any] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__a : Union[str, Any] = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
__a : List[Any] = download(_SCREAMING_SNAKE_CASE )
__a : Any = MODELS_MAP[model_name]['sample_rate']
__a : Any = MODELS_MAP[model_name]['sample_size']
__a : Tuple = Object()
__a : List[Any] = sample_size
__a : List[Any] = sample_rate
__a : Optional[int] = 0
__a : Dict = UNetaDModel(sample_size=_SCREAMING_SNAKE_CASE , sample_rate=_SCREAMING_SNAKE_CASE )
__a : Any = diffusers_model.state_dict()
__a : Union[str, Any] = DiffusionUncond(_SCREAMING_SNAKE_CASE )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_SCREAMING_SNAKE_CASE )['state_dict'] )
__a : int = orig_model.diffusion_ema.eval()
__a : Any = orig_model.state_dict()
__a : Optional[int] = rename_orig_weights(_SCREAMING_SNAKE_CASE )
__a : List[str] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__a : List[Any] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_SCREAMING_SNAKE_CASE ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(_SCREAMING_SNAKE_CASE ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
__a : Dict = value.squeeze()
__a : int = value
diffusers_model.load_state_dict(_SCREAMING_SNAKE_CASE )
__a : str = 100
__a : Optional[int] = 33
__a : str = IPNDMScheduler(num_train_timesteps=_SCREAMING_SNAKE_CASE )
__a : str = torch.manual_seed(_SCREAMING_SNAKE_CASE )
__a : List[Any] = torch.randn([1, 2, config.sample_size] , generator=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
__a : List[str] = torch.linspace(1 , 0 , steps + 1 , device=_SCREAMING_SNAKE_CASE )[:-1]
__a : Optional[Any] = get_crash_schedule(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = DanceDiffusionPipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = torch.manual_seed(33 )
__a : Any = pipe(num_inference_steps=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).audios
__a : Optional[int] = sampling.iplms_sample(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {} )
__a : List[Any] = generated.clamp(-1 , 1 )
__a : List[Any] = (generated - audio).abs().sum()
__a : Union[str, Any] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , _SCREAMING_SNAKE_CASE )
print('Diff max' , _SCREAMING_SNAKE_CASE )
assert diff_max < 1e-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
__lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
__lowercase : List[Any] = parser.parse_args()
main(args)
| 294 |
'''simple docstring'''
import sys
__lowercase : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : List[str] = 1
for digit in s:
product *= int(_SCREAMING_SNAKE_CASE )
return product
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = N ):
__a : Optional[int] = -sys.maxsize - 1
__a : Optional[Any] = n[:13]
__a : int = 13
while cur_index < len(_SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__a : List[Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
__a : Dict = max(_SCREAMING_SNAKE_CASE , str_eval(_SCREAMING_SNAKE_CASE ) )
__a : Optional[Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 294 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'xlm'
__lowerCamelCase = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self , lowercase=30145 , lowercase=2048 , lowercase=12 , lowercase=16 , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=False , lowercase=False , lowercase=False , lowercase=1 , lowercase=True , lowercase=512 , lowercase=2048**-0.5 , lowercase=1e-12 , lowercase=0.02 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=3 , lowercase=5 , lowercase=True , lowercase="first" , lowercase=True , lowercase=None , lowercase=True , lowercase=0.1 , lowercase=5 , lowercase=5 , lowercase=0 , lowercase=0 , lowercase=2 , lowercase=0 , **lowercase , ) -> Optional[int]:
'''simple docstring'''
A__ = vocab_size
A__ = emb_dim
A__ = n_layers
A__ = n_heads
A__ = dropout
A__ = attention_dropout
A__ = gelu_activation
A__ = sinusoidal_embeddings
A__ = causal
A__ = asm
A__ = n_langs
A__ = use_lang_emb
A__ = layer_norm_eps
A__ = bos_index
A__ = eos_index
A__ = pad_index
A__ = unk_index
A__ = mask_index
A__ = is_encoder
A__ = max_position_embeddings
A__ = embed_init_std
A__ = init_std
A__ = summary_type
A__ = summary_use_proj
A__ = summary_activation
A__ = summary_proj_to_labels
A__ = summary_first_dropout
A__ = start_n_top
A__ = end_n_top
A__ = mask_token_id
A__ = lang_id
if "n_words" in kwargs:
A__ = kwargs["n_words"]
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , **lowercase )
class a__ ( snake_case ):
"""simple docstring"""
@property
def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 68 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class a__ :
"""simple docstring"""
__lowerCamelCase = BlenderbotSmallConfig
__lowerCamelCase = {}
__lowerCamelCase = 'gelu'
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def UpperCamelCase ( self , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = TFBlenderbotSmallModel(config=lowercase ).get_decoder()
A__ = inputs_dict["input_ids"]
A__ = input_ids[:1, :]
A__ = inputs_dict["attention_mask"][:1, :]
A__ = inputs_dict["head_mask"]
A__ = 1
# first forward pass
A__ = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(lowercase , attention_mask=lowercase )[0]
A__ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Dict=None , SCREAMING_SNAKE_CASE_: List[str]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = TFBlenderbotSmallModelTester(self )
A__ = ConfigTester(self , config_class=lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_tokenizers
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__lowerCamelCase = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.tokenizer(self.src_text , return_tensors="tf" )
A__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 68 | 1 |
'''simple docstring'''
import random
class _A :
"""simple docstring"""
@staticmethod
def __A ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = [ord(__SCREAMING_SNAKE_CASE ) for i in text]
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Tuple = []
for i in plain:
__UpperCAmelCase : List[Any] = random.randint(1 , 300 )
__UpperCAmelCase : Dict = (i + k) * k
cipher.append(__SCREAMING_SNAKE_CASE )
key.append(__SCREAMING_SNAKE_CASE )
return cipher, key
@staticmethod
def __A ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__UpperCAmelCase : int = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase : int = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(__SCREAMING_SNAKE_CASE ) )
return "".join(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCamelCase = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 351 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
| 16 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __UpperCAmelCase ( a_: str, a_: int ):
_UpperCAmelCase : List[Any] = list(_lowercase )
_UpperCAmelCase : str = list(_lowercase )
_UpperCAmelCase : Tuple = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count += 1
_UpperCAmelCase : str = """_"""
if count > 1:
return False
else:
return "".join(_lowercase )
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Optional[Any] = []
while True:
_UpperCAmelCase : Union[str, Any] = ["""$"""] * len(_lowercase )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(_lowercase ) ):
for j in range(i + 1, len(_lowercase ) ):
_UpperCAmelCase : List[Any] = compare_string(binary[i], binary[j] )
if k is False:
_UpperCAmelCase : Union[str, Any] = """*"""
_UpperCAmelCase : Any = """*"""
temp.append("X" )
for i in range(len(_lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowercase ) == 0:
return pi
_UpperCAmelCase : str = list(set(_lowercase ) )
def __UpperCAmelCase ( a_: int, a_: Union[str, Any] ):
_UpperCAmelCase : Any = []
for minterm in minterms:
_UpperCAmelCase : List[Any] = """"""
for _ in range(_lowercase ):
_UpperCAmelCase : List[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowercase )
return temp
def __UpperCAmelCase ( a_: Dict, a_: List[Any], a_: List[Any] ):
_UpperCAmelCase : Dict = list(_lowercase )
_UpperCAmelCase : List[Any] = list(_lowercase )
_UpperCAmelCase : Tuple = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __UpperCAmelCase ( a_: str, a_: Any ):
_UpperCAmelCase : int = []
_UpperCAmelCase : Dict = [0] * len(_lowercase )
for i in range(len(chart[0] ) ):
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : str = -1
for j in range(len(_lowercase ) ):
if chart[j][i] == 1:
count += 1
_UpperCAmelCase : List[str] = j
if count == 1:
_UpperCAmelCase : Tuple = 1
for i in range(len(_lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowercase ) ):
_UpperCAmelCase : List[str] = 0
temp.append(prime_implicants[i] )
while True:
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : str = -1
_UpperCAmelCase : int = 0
for i in range(len(_lowercase ) ):
_UpperCAmelCase : Tuple = chart[i].count(1 )
if count_n > max_n:
_UpperCAmelCase : List[str] = count_n
_UpperCAmelCase : Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowercase ) ):
_UpperCAmelCase : Union[str, Any] = 0
def __UpperCAmelCase ( a_: Dict, a_: Optional[Any] ):
_UpperCAmelCase : Any = [[0 for x in range(len(_lowercase ) )] for x in range(len(_lowercase ) )]
for i in range(len(_lowercase ) ):
_UpperCAmelCase : List[Any] = prime_implicants[i].count("_" )
for j in range(len(_lowercase ) ):
if is_for_table(prime_implicants[i], binary[j], _lowercase ):
_UpperCAmelCase : Union[str, Any] = 1
return chart
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = int(input("Enter the no. of variables\n" ) )
_UpperCAmelCase : int = [
float(_lowercase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_UpperCAmelCase : Union[str, Any] = decimal_to_binary(_lowercase, _lowercase )
_UpperCAmelCase : str = check(_lowercase )
print("Prime Implicants are:" )
print(_lowercase )
_UpperCAmelCase : Any = prime_implicant_chart(_lowercase, _lowercase )
_UpperCAmelCase : Optional[Any] = selection(_lowercase, _lowercase )
print("Essential Prime Implicants are:" )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 145 |
"""simple docstring"""
import re
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[int] = re.compile(
r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" )
return bool(re.search(_lowercase, _lowercase ) )
if __name__ == "__main__":
__a = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 66 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ : list ) -> list:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return [tuple(SCREAMING_SNAKE_CASE__ )]
A__ = []
def generate(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , SCREAMING_SNAKE_CASE__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A__ , A__ = arr[k - 1], arr[i]
else: # k is odd
A__ , A__ = arr[k - 1], arr[0]
generate(k - 1 , SCREAMING_SNAKE_CASE__ )
generate(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
return res
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 359 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase_ = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase_ = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
lowercase_ = "zero2"
lowercase_ = "zero3"
lowercase_ = [ZEROa, ZEROa]
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
'''simple docstring'''
A__ = parameterized.to_safe_name('_'.join(str(SCREAMING_SNAKE_CASE__ ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
lowercase_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class A ( _UpperCAmelCase ):
"""simple docstring"""
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : int,lowercase_ : str,lowercase_ : Any )-> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
@require_torch_multi_gpu
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : Union[str, Any],lowercase_ : Optional[Any],lowercase_ : List[Any] )-> int:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : List[str],lowercase_ : List[str],lowercase_ : List[Any] )-> Any:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
@require_torch_multi_gpu
@parameterized.expand(lowercase_,name_func=lowercase_ )
def snake_case__ ( self : Dict,lowercase_ : Optional[Any],lowercase_ : List[Any] )-> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=lowercase_,model=lowercase_,distributed=lowercase_,fpaa=lowercase_,)
def snake_case__ ( self : Tuple,lowercase_ : Any )-> Union[str, Any]:
'''simple docstring'''
pass
def snake_case__ ( self : int,lowercase_ : str,lowercase_ : str,lowercase_ : int = 1_0,lowercase_ : bool = True,lowercase_ : bool = True,lowercase_ : bool = True,)-> Union[str, Any]:
'''simple docstring'''
A__ = models[model]
A__ = self.run_trainer(
stage=lowercase_,model_name=lowercase_,eval_steps=lowercase_,num_train_epochs=1,distributed=lowercase_,fpaa=lowercase_,)
self.do_checks(lowercase_ )
return output_dir
def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : str,lowercase_ : int = 1_0,lowercase_ : int = 1,lowercase_ : bool = True,lowercase_ : bool = True,)-> Any:
'''simple docstring'''
A__ = self.get_auto_remove_tmp_dir('./xxx',after=lowercase_ )
A__ = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowercase_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A__ = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
A__ = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
A__ = self.get_launcher(lowercase_ )
A__ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase_,env=self.get_env() )
return output_dir
def snake_case__ ( self : Any,lowercase_ : int=False )-> Tuple:
'''simple docstring'''
A__ = min(2,get_gpu_count() ) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 282 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( A_ ,A_ ,A_):
UpperCamelCase__: str = list(range(len(A_)))
UpperCamelCase__: Tuple = [v / w for v, w in zip(A_ ,A_)]
index.sort(key=lambda A_: ratio[i] ,reverse=A_)
UpperCamelCase__: float = 0
UpperCamelCase__: list[float] = [0] * len(A_)
for i in index:
if weight[i] <= capacity:
UpperCamelCase__: List[str] = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCamelCase__: Tuple = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_ ,A_):
UpperCamelCase__: List[str] = cva.getAffineTransform(A_ ,A_)
return cva.warpAffine(A_ ,A_ ,(rows, cols))
if __name__ == "__main__":
# read original image
A__: Union[str, Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
A__: Tuple = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A__ , A__: List[Any] = gray_img.shape
# set different points to rotate image
A__: Tuple = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A__: Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A__: Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A__: Union[str, Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A__: str = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A__: Optional[int] = plt.figure(1)
A__: List[str] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 149 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''AutoImageProcessor'''
lowerCAmelCase = '''AutoTokenizer'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = self.image_processor
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
__A : Any = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase)
if images is not None:
__A : Tuple = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase)
if text is not None and images is not None:
__A : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase) , tensor_type=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"] | 190 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowerCAmelCase ( ) -> Union[str, Any]:
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=__snake_case , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=__snake_case , default=5 )
parser.add_argument('--batch_size' , type=__snake_case , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=__snake_case , default=1 )
parser.add_argument('--freeze' , type=__snake_case , default=__snake_case )
parser.add_argument('--learning_rate' , type=__snake_case , default=5e-4 )
parser.add_argument('--seed' , type=__snake_case , default=0 )
parser.add_argument('--lr_scheduler_type' , type=__snake_case , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=__snake_case , default=10 )
parser.add_argument('--weight_decay' , type=__snake_case , default=0.01 )
parser.add_argument('--output_dir' , type=__snake_case , default='./results' )
return parser.parse_args()
lowercase__ : Tuple = load('''accuracy''')
def _lowerCAmelCase ( __snake_case : int ) -> Any:
__A ,__A : List[Any] = eval_pred
__A : Dict = np.argmax(__snake_case , axis=1 )
return metric.compute(predictions=__snake_case , references=__snake_case )
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : int = trainer
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
if control.should_evaluate:
__A : str = deepcopy(_UpperCAmelCase)
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train')
return control_copy
def _lowerCAmelCase ( ) -> str:
__A : List[Any] = get_args()
set_seed(args.seed )
__A : Union[str, Any] = load_dataset('codeparrot/codecomplex' , split='train' )
__A : Optional[int] = dataset.train_test_split(test_size=0.2 )
__A : Union[str, Any] = train_test['test'].train_test_split(test_size=0.5 )
__A : Optional[Any] = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
__A : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
__A : Tuple = tokenizer.eos_token
__A : Dict = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__A : Optional[int] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__A : Optional[Any] = False
__A : Dict = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(__snake_case : Optional[Any] ):
__A : Optional[Any] = tokenizer(example['src'] , truncation=__snake_case , max_length=10_24 )
__A : str = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__A : str = train_test_validation.map(
__snake_case , batched=__snake_case , remove_columns=train_test_validation['train'].column_names , )
__A : str = DataCollatorWithPadding(tokenizer=__snake_case )
__A : str = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
__A : Tuple = Trainer(
model=__snake_case , args=__snake_case , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=__snake_case , data_collator=__snake_case , compute_metrics=__snake_case , )
print('Training...' )
trainer.add_callback(CustomCallback(__snake_case ) )
trainer.train()
if __name__ == "__main__":
main() | 190 | 1 |
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , ) -> float:
'''simple docstring'''
A__ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
A__ = 1 - (matter_density + radiation_density + dark_energy)
A__ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
A__ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowercase_ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 7 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(_UpperCamelCase ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__A : int = logging.getLogger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = 42
lowercase = 42
lowercase = 42
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = 42
lowercase = 42
lowercase = None
lowercase = None
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'train'
lowercase = 'dev'
lowercase = 'test'
class __snake_case :
"""simple docstring"""
@staticmethod
def __lowercase ( lowerCamelCase : List[str] , lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def __lowercase ( lowerCamelCase : str ) -> List[str]:
raise NotImplementedError
@staticmethod
def __lowercase ( lowerCamelCase : List[InputExample] , lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : List[str]=False , lowerCamelCase : Any="[CLS]" , lowerCamelCase : List[str]=1 , lowerCamelCase : List[str]="[SEP]" , lowerCamelCase : Optional[int]=False , lowerCamelCase : int=False , lowerCamelCase : Tuple=0 , lowerCamelCase : List[str]=0 , lowerCamelCase : Tuple=-1_00 , lowerCamelCase : Dict=0 , lowerCamelCase : Union[str, Any]=True , ) -> List[InputFeatures]:
lowerCAmelCase_ : Dict = {label: i for i, label in enumerate(lowerCamelCase )}
lowerCAmelCase_ : Dict = []
for ex_index, example in enumerate(lowerCamelCase ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" , lowerCamelCase , len(lowerCamelCase ) )
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Union[str, Any] = []
for word, label in zip(example.words , example.labels ):
lowerCAmelCase_ : str = tokenizer.tokenize(lowerCamelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowerCamelCase ) > 0:
tokens.extend(lowerCamelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowerCamelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowerCAmelCase_ : Dict = tokenizer.num_special_tokens_to_add()
if len(lowerCamelCase ) > max_seq_length - special_tokens_count:
lowerCAmelCase_ : int = tokens[: (max_seq_length - special_tokens_count)]
lowerCAmelCase_ : List[str] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowerCAmelCase_ : Dict = [sequence_a_segment_id] * len(lowerCamelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowerCAmelCase_ : Union[str, Any] = [cls_token] + tokens
lowerCAmelCase_ : Union[str, Any] = [pad_token_label_id] + label_ids
lowerCAmelCase_ : int = [cls_token_segment_id] + segment_ids
lowerCAmelCase_ : Dict = tokenizer.convert_tokens_to_ids(lowerCamelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowerCAmelCase_ : Optional[Any] = [1 if mask_padding_with_zero else 0] * len(lowerCamelCase )
# Zero-pad up to the sequence length.
lowerCAmelCase_ : Tuple = max_seq_length - len(lowerCamelCase )
if pad_on_left:
lowerCAmelCase_ : List[str] = ([pad_token] * padding_length) + input_ids
lowerCAmelCase_ : Tuple = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowerCAmelCase_ : Union[str, Any] = ([pad_token_segment_id] * padding_length) + segment_ids
lowerCAmelCase_ : List[str] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowerCamelCase ) == max_seq_length
assert len(lowerCamelCase ) == max_seq_length
assert len(lowerCamelCase ) == max_seq_length
assert len(lowerCamelCase ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(lowerCamelCase ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(lowerCamelCase ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(lowerCamelCase ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(lowerCamelCase ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(lowerCamelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase_ : int = None
features.append(
InputFeatures(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , label_ids=lowerCamelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 42
lowercase = nn.CrossEntropyLoss().ignore_index
def __init__( self : List[Any] , lowerCamelCase : TokenClassificationTask , lowerCamelCase : str , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[int] = None , lowerCamelCase : List[Any]=False , lowerCamelCase : Split = Split.train , ) -> Union[str, Any]:
# Load data features from cache or dataset file
lowerCAmelCase_ : Optional[int] = os.path.join(
lowerCamelCase , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(lowerCamelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase_ : Any = cached_features_file + """.lock"""
with FileLock(lowerCamelCase ):
if os.path.exists(lowerCamelCase ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
lowerCAmelCase_ : int = torch.load(lowerCamelCase )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
lowerCAmelCase_ : Optional[Any] = token_classification_task.read_examples_from_file(lowerCamelCase , lowerCamelCase )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase_ : str = token_classification_task.convert_examples_to_features(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowerCamelCase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'Saving features into cached file {cached_features_file}' )
torch.save(self.features , lowerCamelCase )
def __len__( self : Any ) -> Dict:
return len(self.features )
def __getitem__( self : Union[str, Any] , lowerCamelCase : List[str] ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __snake_case :
"""simple docstring"""
lowercase = 42
lowercase = -1_00
def __init__( self : Union[str, Any] , lowerCamelCase : TokenClassificationTask , lowerCamelCase : str , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[int] = None , lowerCamelCase : str=False , lowerCamelCase : Split = Split.train , ) -> Optional[Any]:
lowerCAmelCase_ : Dict = token_classification_task.read_examples_from_file(lowerCamelCase , lowerCamelCase )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase_ : str = token_classification_task.convert_examples_to_features(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowerCamelCase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase_ : Tuple = tf.data.Dataset.from_generator(
lowerCamelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
lowerCAmelCase_ : str = tf.data.Dataset.from_generator(
lowerCamelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __lowercase ( self : Tuple ) -> str:
lowerCAmelCase_ : List[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[Any] ) -> int:
return len(self.features )
def __getitem__( self : str , lowerCamelCase : Union[str, Any] ) -> InputFeatures:
return self.features[i]
| 89 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 42
class __snake_case ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , lowerCamelCase : int = 6_55_36 , lowerCamelCase : Optional[int] = None , lowerCamelCase : int = 2 , lowerCamelCase : int = 2 , lowerCamelCase : int = 0 , lowerCamelCase : str = "fourier" , lowerCamelCase : bool = True , lowerCamelCase : bool = False , lowerCamelCase : float = 0.0 , lowerCamelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase : Tuple[str] = "UNetMidBlock1D" , lowerCamelCase : str = None , lowerCamelCase : Tuple[int] = (32, 32, 64) , lowerCamelCase : str = None , lowerCamelCase : int = 8 , lowerCamelCase : int = 1 , lowerCamelCase : bool = False , ) -> List[Any]:
super().__init__()
lowerCAmelCase_ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
lowerCAmelCase_ : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase )
lowerCAmelCase_ : List[Any] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCAmelCase_ : Optional[int] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = block_out_channels[0]
if use_timestep_embedding:
lowerCAmelCase_ : List[str] = block_out_channels[0] * 4
lowerCAmelCase_ : List[str] = TimestepEmbedding(
in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , )
lowerCAmelCase_ : str = nn.ModuleList([] )
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : str = nn.ModuleList([] )
lowerCAmelCase_ : List[str] = None
# down
lowerCAmelCase_ : List[str] = in_channels
for i, down_block_type in enumerate(lowerCamelCase ):
lowerCAmelCase_ : Optional[Any] = output_channel
lowerCAmelCase_ : str = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCAmelCase_ : int = i == len(lowerCamelCase ) - 1
lowerCAmelCase_ : List[str] = get_down_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase )
# mid
lowerCAmelCase_ : Dict = get_mid_block(
lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , )
# up
lowerCAmelCase_ : List[str] = list(reversed(lowerCamelCase ) )
lowerCAmelCase_ : str = reversed_block_out_channels[0]
if out_block_type is None:
lowerCAmelCase_ : Optional[int] = out_channels
else:
lowerCAmelCase_ : Optional[Any] = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase ):
lowerCAmelCase_ : Any = output_channel
lowerCAmelCase_ : str = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels
)
lowerCAmelCase_ : int = i == len(lowerCamelCase ) - 1
lowerCAmelCase_ : Any = get_up_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = output_channel
# out
lowerCAmelCase_ : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCAmelCase_ : Union[str, Any] = get_out_block(
out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def __lowercase ( self : List[Any] , lowerCamelCase : torch.FloatTensor , lowerCamelCase : Union[torch.Tensor, float, int] , lowerCamelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
lowerCAmelCase_ : Optional[Any] = timestep
if not torch.is_tensor(lowerCamelCase ):
lowerCAmelCase_ : Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0:
lowerCAmelCase_ : Dict = timesteps[None].to(sample.device )
lowerCAmelCase_ : int = self.time_proj(lowerCamelCase )
if self.config.use_timestep_embedding:
lowerCAmelCase_ : int = self.time_mlp(lowerCamelCase )
else:
lowerCAmelCase_ : Tuple = timestep_embed[..., None]
lowerCAmelCase_ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCAmelCase_ : Union[str, Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCAmelCase_ : Optional[Any] = ()
for downsample_block in self.down_blocks:
lowerCAmelCase_, lowerCAmelCase_ : Any = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCAmelCase_ : int = self.mid_block(lowerCamelCase , lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCAmelCase_ : str = down_block_res_samples[-1:]
lowerCAmelCase_ : Union[str, Any] = down_block_res_samples[:-1]
lowerCAmelCase_ : List[str] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase )
# 5. post-process
if self.out_block:
lowerCAmelCase_ : Union[str, Any] = self.out_block(lowerCamelCase , lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase )
| 89 | 1 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCAmelCase :
def __init__( self: Optional[int] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: str=13 , _lowerCAmelCase: str=7 , _lowerCAmelCase: List[str]=6 , _lowerCAmelCase: List[Any]=17 , _lowerCAmelCase: Any=23 , _lowerCAmelCase: str=11 , _lowerCAmelCase: List[str]=True , ):
lowercase :Union[str, Any] = parent
lowercase :List[str] = batch_size
lowercase :List[str] = seq_length
lowercase :Any = act_dim
lowercase :int = state_dim
lowercase :Tuple = hidden_size
lowercase :Optional[int] = max_length
lowercase :List[str] = is_training
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :Dict = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase :Dict = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase :Dict = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase :Optional[Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase :str = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
lowercase :Tuple = random_attention_mask((self.batch_size, self.seq_length) )
lowercase :Tuple = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: Any , _lowerCAmelCase: List[str] , _lowerCAmelCase: Dict , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[Any] , ):
lowercase :List[str] = DecisionTransformerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :List[str] = model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase :Tuple = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) :Tuple = config_and_inputs
lowercase :List[Any] = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
_a = (DecisionTransformerModel,) if is_torch_available() else ()
_a = ()
_a = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_a = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :List[Any] = DecisionTransformerModelTester(self )
lowercase :Optional[int] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self: List[str] ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase :Optional[int] = DecisionTransformerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase , lowercase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Optional[Any] = model_class(_lowerCAmelCase )
lowercase :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase :int = [*signature.parameters.keys()]
lowercase :int = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(_lowerCAmelCase )] , _lowerCAmelCase )
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :str = 2 # number of steps of autoregressive prediction we will perform
lowercase :int = 10 # defined by the RL environment, may be normalized
lowercase :List[str] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowercase :List[str] = model.to(_lowerCAmelCase )
lowercase :List[str] = model.config
torch.manual_seed(0 )
lowercase :Tuple = torch.randn(1 , 1 , config.state_dim ).to(device=_lowerCAmelCase , dtype=torch.floataa ) # env.reset()
lowercase :List[Any] = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=_lowerCAmelCase )
lowercase :Dict = torch.tensor(_lowerCAmelCase , device=_lowerCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase :Union[str, Any] = state
lowercase :Optional[int] = torch.zeros(1 , 0 , config.act_dim , device=_lowerCAmelCase , dtype=torch.floataa )
lowercase :Union[str, Any] = torch.zeros(1 , 0 , device=_lowerCAmelCase , dtype=torch.floataa )
lowercase :Optional[int] = torch.tensor(0 , device=_lowerCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(_lowerCAmelCase ):
lowercase :Tuple = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_lowerCAmelCase )] , dim=1 )
lowercase :str = torch.cat([rewards, torch.zeros(1 , 1 , device=_lowerCAmelCase )] , dim=1 )
lowercase :str = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase , lowercase , lowercase :Dict = model(
states=_lowerCAmelCase , actions=_lowerCAmelCase , rewards=_lowerCAmelCase , returns_to_go=_lowerCAmelCase , timesteps=_lowerCAmelCase , attention_mask=_lowerCAmelCase , return_dict=_lowerCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowercase , lowercase , lowercase , lowercase :List[Any] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=_lowerCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase :int = action_pred[0, -1]
lowercase :str = torch.cat([states, state] , dim=1 )
lowercase :str = returns_to_go[0, -1] - reward
lowercase :int = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase :Union[str, Any] = torch.cat(
[timesteps, torch.ones((1, 1) , device=_lowerCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 236 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase : int = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 236 | 1 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __snake_case ( ):
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__a = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , _UpperCAmelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __snake_case ( ):
assert _test_patching.open is open
__a = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , _UpperCAmelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __snake_case ( ):
# pandas.read_csv is not present in _test_patching
__a = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , _UpperCAmelCase ):
pass
def __snake_case ( ):
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__a = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , _UpperCAmelCase ) is None
with patch_submodule(_test_patching , '''len''' , _UpperCAmelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __snake_case ( ):
__a = '''__test_patch_submodule_start_and_stop_mock__'''
__a = patch_submodule(_test_patching , '''open''' , _UpperCAmelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __snake_case ( ):
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__a = '''__test_patch_submodule_successive_join__'''
__a = '''__test_patch_submodule_successive_dirname__'''
__a = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , _UpperCAmelCase ):
with patch_submodule(_test_patching , '''os.rename''' , _UpperCAmelCase ):
with patch_submodule(_test_patching , '''os.path.dirname''' , _UpperCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , _UpperCAmelCase ):
with patch_submodule(_test_patching , '''os.path.join''' , _UpperCAmelCase ):
with patch_submodule(_test_patching , '''os.path.dirname''' , _UpperCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __snake_case ( ):
__a = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , _UpperCAmelCase ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , _UpperCAmelCase ):
pass
| 365 |
# Lint as: python3
import itertools
import os
import re
__snake_case :int = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
__snake_case :List[Any] = re.compile(r'''([a-z\d])([A-Z])''')
__snake_case :Dict = re.compile(r'''(?<!_)_(?!_)''')
__snake_case :Optional[int] = re.compile(r'''(_{2,})''')
__snake_case :Optional[Any] = r'''^\w+(\.\w+)*$'''
__snake_case :Union[str, Any] = r'''<>:/\|?*'''
def __snake_case ( _UpperCAmelCase ):
__a = _uppercase_uppercase_re.sub(R'''\1_\2''' , _UpperCAmelCase )
__a = _lowercase_uppercase_re.sub(R'''\1_\2''' , _UpperCAmelCase )
return name.lower()
def __snake_case ( _UpperCAmelCase ):
__a = _single_underscore_re.split(_UpperCAmelCase )
__a = [_multiple_underscores_re.split(_UpperCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_UpperCAmelCase ) if n != '''''' )
def __snake_case ( _UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ) != name:
raise ValueError(f'Should be a dataset name, not a path: {name}' )
return camelcase_to_snakecase(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ) != name:
raise ValueError(f'Should be a dataset name, not a path: {name}' )
if not re.match(_split_re , _UpperCAmelCase ):
raise ValueError(f'Split name should match \'{_split_re}\'\' but got \'{split}\'.' )
return f'{filename_prefix_for_name(_UpperCAmelCase )}-{split}'
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
__a = filename_prefix_for_split(_UpperCAmelCase , _UpperCAmelCase )
if filetype_suffix:
prefix += f'.{filetype_suffix}'
__a = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
return f'{filepath}*'
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__a = filename_prefix_for_split(_UpperCAmelCase , _UpperCAmelCase )
__a = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if shard_lengths:
__a = len(_UpperCAmelCase )
__a = [f'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(_UpperCAmelCase )]
if filetype_suffix:
__a = [filename + f'.{filetype_suffix}' for filename in filenames]
return filenames
else:
__a = prefix
if filetype_suffix:
filename += f'.{filetype_suffix}'
return [filename]
| 131 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.